diff --git a/.backportrc.json b/.backportrc.json index 03f3f892f9227..20287f0bfc0e6 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,10 +1,10 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.x", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.x", "8.17", "8.16", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { "^v9.0.0$" : "main", - "^v8.17.0$" : "8.x", + "^v8.18.0$" : "8.x", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } } diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 0ece129a3c238..f25092bc6d42f 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -47,6 +47,8 @@ export GRADLE_BUILD_CACHE_PASSWORD BUILDKITE_API_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/buildkite-api-token) export BUILDKITE_API_TOKEN +export GH_TOKEN="$VAULT_GITHUB_TOKEN" + if [[ "${USE_LUCENE_SNAPSHOT_CREDS:-}" == "true" ]]; then data=$(.buildkite/scripts/get-legacy-secret.sh aws-elastic/creds/lucene-snapshots) @@ -117,3 +119,5 @@ if [[ -f /etc/os-release ]] && grep -q '"Amazon Linux 2"' /etc/os-release; then echo "$(hostname -i | cut -d' ' -f 2) $(hostname -f)." | sudo tee /etc/dnsmasq.hosts sudo systemctl restart dnsmasq.service fi + +.buildkite/scripts/get-latest-test-mutes.sh diff --git a/.buildkite/hooks/pre-command.bat b/.buildkite/hooks/pre-command.bat index fe7c2371de0e5..752c2bf23eb14 100644 --- a/.buildkite/hooks/pre-command.bat +++ b/.buildkite/hooks/pre-command.bat @@ -15,9 +15,12 @@ set BUILD_NUMBER=%BUILDKITE_BUILD_NUMBER% set COMPOSE_HTTP_TIMEOUT=120 set JOB_BRANCH=%BUILDKITE_BRANCH% +set GH_TOKEN=%VAULT_GITHUB_TOKEN% + set GRADLE_BUILD_CACHE_USERNAME=vault read -field=username secret/ci/elastic-elasticsearch/migrated/gradle-build-cache set GRADLE_BUILD_CACHE_PASSWORD=vault read -field=password secret/ci/elastic-elasticsearch/migrated/gradle-build-cache bash.exe -c "nohup bash .buildkite/scripts/setup-monitoring.sh /dev/null 2>&1 &" +bash.exe -c "bash .buildkite/scripts/get-latest-test-mutes.sh" exit /b 0 diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 57412bbe908bc..9d7cf3c7e0083 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -1,6 +1,6 @@ steps: - label: sanity-check - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints precommit timeout_in_minutes: 300 agents: provider: gcp @@ -9,7 +9,7 @@ steps: buildDirectory: /dev/shm/bk - wait - label: part1 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart1 timeout_in_minutes: 300 agents: provider: gcp @@ -17,7 +17,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part2 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart2 timeout_in_minutes: 300 agents: provider: gcp @@ -25,7 +25,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part3 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart3 timeout_in_minutes: 300 agents: provider: gcp @@ -33,7 +33,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part4 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart4 timeout_in_minutes: 300 agents: provider: gcp @@ -41,7 +41,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part5 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart5 timeout_in_minutes: 300 agents: provider: gcp @@ -51,7 +51,7 @@ steps: - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints v$$BWC_VERSION#bwcTest timeout_in_minutes: 300 matrix: setup: @@ -64,7 +64,7 @@ steps: env: BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: rest-compat - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkRestCompat timeout_in_minutes: 300 agents: provider: gcp diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 19e99852869e6..6c8b8edfcbac1 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -1,7 +1,7 @@ # This file is auto-generated. See .buildkite/pipelines/intake.template.yml steps: - label: sanity-check - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints precommit timeout_in_minutes: 300 agents: provider: gcp @@ -10,7 +10,7 @@ steps: buildDirectory: /dev/shm/bk - wait - label: part1 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart1 timeout_in_minutes: 300 agents: provider: gcp @@ -18,7 +18,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part2 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart2 timeout_in_minutes: 300 agents: provider: gcp @@ -26,7 +26,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part3 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart3 timeout_in_minutes: 300 agents: provider: gcp @@ -34,7 +34,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part4 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart4 timeout_in_minutes: 300 agents: provider: gcp @@ -42,7 +42,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk - label: part5 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart5 timeout_in_minutes: 300 agents: provider: gcp @@ -52,11 +52,11 @@ steps: - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints v$$BWC_VERSION#bwcTest timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -65,7 +65,7 @@ steps: env: BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: rest-compat - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkRestCompat timeout_in_minutes: 300 agents: provider: gcp diff --git a/.buildkite/pipelines/lucene-snapshot/run-tests.yml b/.buildkite/pipelines/lucene-snapshot/run-tests.yml index f7293e051467c..ddc63419a2e2f 100644 --- a/.buildkite/pipelines/lucene-snapshot/run-tests.yml +++ b/.buildkite/pipelines/lucene-snapshot/run-tests.yml @@ -1,6 +1,6 @@ steps: - label: sanity-check - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints precommit timeout_in_minutes: 300 agents: provider: gcp @@ -9,7 +9,7 @@ steps: buildDirectory: /dev/shm/bk - wait: null - label: part1 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart1 timeout_in_minutes: 300 agents: provider: gcp @@ -17,7 +17,7 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk - label: part2 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart2 timeout_in_minutes: 300 agents: provider: gcp @@ -25,7 +25,7 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk - label: part3 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart3 timeout_in_minutes: 300 agents: provider: gcp @@ -33,7 +33,7 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk - label: part4 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart4 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart4 timeout_in_minutes: 300 agents: provider: gcp @@ -41,7 +41,7 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk - label: part5 - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkPart5 timeout_in_minutes: 300 agents: provider: gcp @@ -51,7 +51,7 @@ steps: - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints v$$BWC_VERSION#bwcTest timeout_in_minutes: 300 matrix: setup: @@ -66,7 +66,7 @@ steps: env: BWC_VERSION: "{{matrix.BWC_VERSION}}" - label: rest-compat - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-file-fingerprints checkRestCompat timeout_in_minutes: 300 agents: provider: gcp diff --git a/.buildkite/pipelines/periodic-packaging.template.yml b/.buildkite/pipelines/periodic-packaging.template.yml index 081d059460653..1a1e46d55f7a4 100644 --- a/.buildkite/pipelines/periodic-packaging.template.yml +++ b/.buildkite/pipelines/periodic-packaging.template.yml @@ -8,6 +8,7 @@ steps: setup: image: - debian-11 + - debian-12 - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 7dd8269f4ffe6..c1b10a46c62a7 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -9,6 +9,7 @@ steps: setup: image: - debian-11 + - debian-12 - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 @@ -272,8 +273,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.4 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.4 + - label: "{{matrix.image}} / 8.15.6 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.6 timeout_in_minutes: 300 matrix: setup: @@ -286,10 +287,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.4 + BWC_VERSION: 8.15.6 - - label: "{{matrix.image}} / 8.16.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.1 + - label: "{{matrix.image}} / 8.16.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.2 timeout_in_minutes: 300 matrix: setup: @@ -302,7 +303,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.1 + BWC_VERSION: 8.16.2 - label: "{{matrix.image}} / 8.17.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.0 @@ -320,6 +321,22 @@ steps: env: BWC_VERSION: 8.17.0 + - label: "{{matrix.image}} / 8.18.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.18.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.18.0 + - label: "{{matrix.image}} / 9.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v9.0.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index f9f75488f0917..79e5a2e8dcdbb 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -8,6 +8,7 @@ steps: setup: image: - debian-11 + - debian-12 - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 79371d6ddccf5..69d11ef1dabb6 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -287,8 +287,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.4 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.4#bwcTest + - label: 8.15.6 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.6#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -297,7 +297,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.4 + BWC_VERSION: 8.15.6 retry: automatic: - exit_status: "-1" @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.1#bwcTest + - label: 8.16.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.2#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.1 + BWC_VERSION: 8.16.2 retry: automatic: - exit_status: "-1" @@ -344,6 +344,25 @@ steps: - signal_reason: agent_stop limit: 3 + - label: 8.18.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.18.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + preemptible: true + env: + BWC_VERSION: 8.18.0 + retry: + automatic: + - exit_status: "-1" + limit: 3 + signal_reason: none + - signal_reason: agent_stop + limit: 3 + - label: 9.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v9.0.0#bwcTest timeout_in_minutes: 300 @@ -429,7 +448,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -471,7 +490,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.2", "8.17.0", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml index 04ccc41891b3b..ffc1350aceab3 100644 --- a/.buildkite/pipelines/pull-request/packaging-tests-unix.yml +++ b/.buildkite/pipelines/pull-request/packaging-tests-unix.yml @@ -3,68 +3,15 @@ config: steps: - group: packaging-tests-unix steps: - - label: "{{matrix.image}} / docker / packaging-tests-unix" - key: "packaging-tests-unix-docker" - command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.docker-cloud-ess - timeout_in_minutes: 300 - matrix: - setup: - image: - - debian-11 - - opensuse-leap-15 - - oraclelinux-7 - - oraclelinux-8 - - sles-12 - - sles-15 - - ubuntu-1804 - - ubuntu-2004 - - ubuntu-2204 - - rocky-8 - - rocky-9 - - rhel-7 - - rhel-8 - - rhel-9 - - almalinux-8 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - diskSizeGb: 350 - machineType: custom-16-32768 - - label: "{{matrix.image}} / packages / packaging-tests-unix" - key: "packaging-tests-unix-packages" - command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.packages - timeout_in_minutes: 300 - matrix: - setup: - image: - - debian-11 - - opensuse-leap-15 - - oraclelinux-7 - - oraclelinux-8 - - sles-12 - - sles-15 - - ubuntu-1804 - - ubuntu-2004 - - ubuntu-2204 - - rocky-8 - - rocky-9 - - rhel-7 - - rhel-8 - - rhel-9 - - almalinux-8 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - diskSizeGb: 350 - machineType: custom-16-32768 - - label: "{{matrix.image}} / archives / packaging-tests-unix" - key: "packaging-tests-unix-archives" - command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.archives + - label: "{{matrix.image}} / {{matrix.PACKAGING_TASK}} / packaging-tests-unix" + key: "packaging-tests-unix" + command: ./.ci/scripts/packaging-test.sh destructiveDistroTest.{{matrix.PACKAGING_TASK}} timeout_in_minutes: 300 matrix: setup: image: - debian-11 + - debian-12 - opensuse-leap-15 - oraclelinux-7 - oraclelinux-8 @@ -79,6 +26,11 @@ steps: - rhel-8 - rhel-9 - almalinux-8 + PACKAGING_TASK: + - docker + - docker-cloud-ess + - packages + - archives agents: provider: gcp image: family/elasticsearch-{{matrix.image}} diff --git a/.buildkite/scripts/dra-workflow.sh b/.buildkite/scripts/dra-workflow.sh index 81b8225e443a4..f2dc40ca1927f 100755 --- a/.buildkite/scripts/dra-workflow.sh +++ b/.buildkite/scripts/dra-workflow.sh @@ -6,7 +6,7 @@ WORKFLOW="${DRA_WORKFLOW:-snapshot}" BRANCH="${BUILDKITE_BRANCH:-}" # Don't publish main branch to staging -if [[ "$BRANCH" == "main" && "$WORKFLOW" == "staging" ]]; then +if [[ ("$BRANCH" == "main" || "$BRANCH" == *.x) && "$WORKFLOW" == "staging" ]]; then exit 0 fi diff --git a/.buildkite/scripts/get-latest-test-mutes.sh b/.buildkite/scripts/get-latest-test-mutes.sh new file mode 100755 index 0000000000000..5721e29f1b773 --- /dev/null +++ b/.buildkite/scripts/get-latest-test-mutes.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +if [[ ! "${BUILDKITE_PULL_REQUEST:-}" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then + exit 0 +fi + +testMuteBranch="${BUILDKITE_PULL_REQUEST_BASE_BRANCH:-main}" +testMuteFile="$(mktemp)" + +# If this PR contains changes to muted-tests.yml, we disable this functionality +# Otherwise, we wouldn't be able to test unmutes +if [[ ! $(gh pr diff "$BUILDKITE_PULL_REQUEST" --name-only | grep 'muted-tests.yml') ]]; then + gh api -H 'Accept: application/vnd.github.v3.raw' "repos/elastic/elasticsearch/contents/muted-tests.yml?ref=$testMuteBranch" > "$testMuteFile" + + if [[ -s "$testMuteFile" ]]; then + mkdir -p ~/.gradle + # This is using gradle.properties instead of an env var so that it's easily compatible with the Windows pre-command hook + echo "org.gradle.project.org.elasticsearch.additional.muted.tests=$testMuteFile" >> ~/.gradle/gradle.properties + fi +fi diff --git a/.buildkite/scripts/gradle-build-cache-validation.sh b/.buildkite/scripts/gradle-build-cache-validation.sh index 75dc9b264b8bc..3c5021e436e4a 100755 --- a/.buildkite/scripts/gradle-build-cache-validation.sh +++ b/.buildkite/scripts/gradle-build-cache-validation.sh @@ -2,18 +2,17 @@ set -euo pipefail -VALIDATION_SCRIPTS_VERSION=2.5.1 +VALIDATION_SCRIPTS_VERSION=2.7.1 GRADLE_ENTERPRISE_ACCESS_KEY=$(vault kv get -field=value secret/ci/elastic-elasticsearch/gradle-enterprise-api-key) export GRADLE_ENTERPRISE_ACCESS_KEY - -curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o gradle-enterprise-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip +curl -s -L -O https://github.com/gradle/gradle-enterprise-build-validation-scripts/releases/download/v$VALIDATION_SCRIPTS_VERSION/develocity-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip && unzip -q -o develocity-gradle-build-validation-$VALIDATION_SCRIPTS_VERSION.zip # Create a temporary file tmpOutputFile=$(mktemp) trap "rm $tmpOutputFile" EXIT set +e -gradle-enterprise-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --gradle-enterprise-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile +develocity-gradle-build-validation/03-validate-local-build-caching-different-locations.sh -r https://github.com/elastic/elasticsearch.git -b $BUILDKITE_BRANCH --develocity-server https://gradle-enterprise.elastic.co -t precommit --fail-if-not-fully-cacheable | tee $tmpOutputFile # Capture the return value retval=$? set -e diff --git a/.buildkite/scripts/gradle-configuration-cache-validation.sh b/.buildkite/scripts/gradle-configuration-cache-validation.sh index 8249155c5ffc5..55a4b18a1e887 100755 --- a/.buildkite/scripts/gradle-configuration-cache-validation.sh +++ b/.buildkite/scripts/gradle-configuration-cache-validation.sh @@ -2,18 +2,17 @@ set -euo pipefail -# TODO/ FIXIT without a full resolved gradle home, we see issues configuration cache reuse -./gradlew --max-workers=8 --parallel --scan --no-daemon precommit +# This is a workaround for https://github.com/gradle/gradle/issues/28159 +.ci/scripts/run-gradle.sh --no-daemon precommit -./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 +.ci/scripts/run-gradle.sh --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 # Create a temporary file tmpOutputFile=$(mktemp) trap "rm $tmpOutputFile" EXIT echo "2nd run" -# TODO run-gradle.sh script causes issues because of init script handling -./gradlew --max-workers=8 --parallel --scan --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile +.ci/scripts/run-gradle.sh --configuration-cache precommit -Dorg.gradle.configuration-cache.inputs.unsafe.ignore.file-system-checks=build/*.tar.bz2 | tee $tmpOutputFile # Check if the command was successful if grep -q "Configuration cache entry reused." $tmpOutputFile; then diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 85522e47a523f..826091807ce57 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -14,7 +14,8 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.4" - - "8.16.1" + - "8.15.6" + - "8.16.2" - "8.17.0" + - "8.18.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 9ea3072021bb3..5514fc376a285 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,5 @@ BWC_VERSION: - - "8.16.1" + - "8.16.2" - "8.17.0" + - "8.18.0" - "9.0.0" diff --git a/.editorconfig b/.editorconfig index cf4f74744d2b4..774fd201ef8d5 100644 --- a/.editorconfig +++ b/.editorconfig @@ -209,7 +209,7 @@ indent_size = 4 max_line_length = 140 ij_java_class_count_to_use_import_on_demand = 999 ij_java_names_count_to_use_import_on_demand = 999 -ij_java_imports_layout = *,|,com.**,|,org.**,|,java.**,javax.**,|,$* +ij_java_imports_layout = *,|,com.**,|,org.**,|,java.**,|,javax.**,|,$* [*.json] indent_size = 2 diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 3a066be2f8ea9..6f2dc3c64febe 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -472,7 +472,7 @@ You can run a group of YAML test by using wildcards: --tests "org.elasticsearch.test.rest.ClientYamlTestSuiteIT.test {yaml=index/*/*}" --------------------------------------------------------------------------- -or +or --------------------------------------------------------------------------- ./gradlew :rest-api-spec:yamlRestTest \ @@ -564,8 +564,8 @@ Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). Another use case, since the introduction of serverless, is to test BWC against main in addition to the other released branches. To do so, specify the `bwc.refspec` remote and branch to use for the BWC build as `origin/main`. -To test against main, you will also need to create a new version in link:./server/src/main/java/org/elasticsearch/Version.java[Version.java], -increment `elasticsearch` in link:./build-tools-internal/version.properties[version.properties], and hard-code the `project.version` for ml-cpp +To test against main, you will also need to create a new version in link:./server/src/main/java/org/elasticsearch/Version.java[Version.java], +increment `elasticsearch` in link:./build-tools-internal/version.properties[version.properties], and hard-code the `project.version` for ml-cpp in link:./x-pack/plugin/ml/build.gradle[ml/build.gradle]. In general, to test the changes, you can instruct Gradle to build the BWC version from another remote/branch combination instead of pulling the release branch from GitHub. @@ -625,7 +625,7 @@ For specific YAML rest tests one can use For disabling entire types of tests for subprojects, one can use for example: ------------------------------------------------ -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm) { // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index f3ced9f16d327..25cfae6c9803a 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.TestUtil /* @@ -78,7 +77,7 @@ tasks.register("copyPainless", Copy) { } tasks.named("run").configure { - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index" dependsOn "copyExpression", "copyPainless", configurations.nativeLib systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString()) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java index d3f210f774782..74cea5d5f1549 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/MapperServiceFactory.java @@ -10,7 +10,6 @@ package org.elasticsearch.benchmark.index.mapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -28,7 +27,6 @@ import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.Script; @@ -56,13 +54,7 @@ public static MapperService create(String mappings) { MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); MapperService mapperService = new MapperService( () -> TransportVersion.current(), indexSettings, diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index 3790be5f279d1..b44f04c3a26a4 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.script.DocReader; @@ -76,8 +77,7 @@ public class ScriptScoreBenchmark { private final PluginsService pluginsService = new PluginsService( Settings.EMPTY, null, - null, - Path.of(System.getProperty("plugins.dir")) + PluginsLoader.createPluginsLoader(null, Path.of(System.getProperty("plugins.dir"))) ); private final ScriptModule scriptModule = new ScriptModule(Settings.EMPTY, pluginsService.filterPlugins(ScriptPlugin.class).toList()); diff --git a/branches.json b/branches.json index e81d511a88458..0e23a795664dd 100644 --- a/branches.json +++ b/branches.json @@ -7,6 +7,9 @@ { "branch": "8.16" }, + { + "branch": "8.17" + }, { "branch": "8.x" }, diff --git a/build-conventions/build.gradle b/build-conventions/build.gradle index d8c211c0f02f9..b0eda5a34065a 100644 --- a/build-conventions/build.gradle +++ b/build-conventions/build.gradle @@ -12,9 +12,6 @@ import org.gradle.plugins.ide.eclipse.model.SourceFolder buildscript { repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() } } @@ -70,10 +67,6 @@ gradlePlugin { } repositories { - maven { - url 'https://jitpack.io' - } - mavenCentral() gradlePluginPortal() } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java index 9a35aa41ba1e5..0b04496866ca9 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GUtils.java @@ -16,4 +16,12 @@ public abstract class GUtils { public static String capitalize(String s) { return s.substring(0, 1).toUpperCase(Locale.ROOT) + s.substring(1); } + + public static T elvis(T given, T fallback) { + if (given == null) { + return fallback; + } else { + return given; + } + } } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java index cd13743ee0746..c3124812e5089 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java @@ -9,12 +9,14 @@ package org.elasticsearch.gradle.internal.conventions; -import org.elasticsearch.gradle.internal.conventions.precommit.PomValidationPrecommitPlugin; +import groovy.util.Node; + import com.github.jengelman.gradle.plugins.shadow.ShadowExtension; import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin; -import groovy.util.Node; -import org.elasticsearch.gradle.internal.conventions.util.Util; + import org.elasticsearch.gradle.internal.conventions.info.GitInfo; +import org.elasticsearch.gradle.internal.conventions.precommit.PomValidationPrecommitPlugin; +import org.elasticsearch.gradle.internal.conventions.util.Util; import org.gradle.api.NamedDomainObjectSet; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -35,11 +37,12 @@ import org.gradle.api.tasks.bundling.Jar; import org.gradle.initialization.layout.BuildLayout; import org.gradle.language.base.plugins.LifecycleBasePlugin; +import org.w3c.dom.Element; -import javax.inject.Inject; import java.io.File; import java.util.Map; import java.util.concurrent.Callable; +import javax.inject.Inject; public class PublishPlugin implements Plugin { @@ -64,6 +67,7 @@ public void apply(Project project) { configureSourcesJar(project); configurePomGeneration(project); configurePublications(project); + formatGeneratedPom(project); } private void configurePublications(Project project) { @@ -113,29 +117,32 @@ private void configurePomGeneration(Project project) { var archivesBaseName = providerFactory.provider(() -> getArchivesBaseName(extensions)); var projectVersion = providerFactory.provider(() -> project.getVersion()); var generateMavenPoms = project.getTasks().withType(GenerateMavenPom.class); - generateMavenPoms.configureEach( - pomTask -> pomTask.setDestination( + generateMavenPoms.configureEach(pomTask -> { + pomTask.setDestination( (Callable) () -> String.format( "%s/distributions/%s-%s.pom", projectLayout.getBuildDirectory().get().getAsFile().getPath(), archivesBaseName.get(), projectVersion.get() ) - ) - ); + ); + }); + var publishing = extensions.getByType(PublishingExtension.class); final var mavenPublications = publishing.getPublications().withType(MavenPublication.class); - addNameAndDescriptiontoPom(project, mavenPublications); + addNameAndDescriptionToPom(project, mavenPublications); mavenPublications.configureEach(publication -> { - // Add git origin info to generated POM files for internal builds - publication.getPom().withXml(xml -> addScmInfo(xml, gitInfo.get())); + publication.getPom().withXml(xml -> { + // Add git origin info to generated POM files for internal builds + addScmInfo(xml, gitInfo.get()); + }); // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(archivesBaseName.get())); generatePomTask.configure(t -> t.dependsOn(generateMavenPoms)); }); } - private void addNameAndDescriptiontoPom(Project project, NamedDomainObjectSet mavenPublications) { + private void addNameAndDescriptionToPom(Project project, NamedDomainObjectSet mavenPublications) { var name = project.getName(); var description = providerFactory.provider(() -> project.getDescription() != null ? project.getDescription() : ""); mavenPublications.configureEach(p -> p.getPom().withXml(xml -> { @@ -186,4 +193,32 @@ static void configureSourcesJar(Project project) { project.getTasks().named(BasePlugin.ASSEMBLE_TASK_NAME).configure(t -> t.dependsOn(sourcesJarTask)); }); } + + + /** + * Format the generated pom files to be in a sort of reproducible order. + */ + private void formatGeneratedPom(Project project) { + var publishing = project.getExtensions().getByType(PublishingExtension.class); + final var mavenPublications = publishing.getPublications().withType(MavenPublication.class); + mavenPublications.configureEach(publication -> { + publication.getPom().withXml(xml -> { + // Add some pom formatting + formatDependencies(xml); + }); + }); + } + + /** + * just ensure we put dependencies to the end. more a cosmetic thing than anything else + * */ + private void formatDependencies(XmlProvider xml) { + Element rootElement = xml.asElement(); + var dependencies = rootElement.getElementsByTagName("dependencies"); + if (dependencies.getLength() == 1 && dependencies.item(0) != null) { + org.w3c.dom.Node item = dependencies.item(0); + rootElement.removeChild(item); + rootElement.appendChild(item); + } + } } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java index ea9009172c7e2..41c0b4d67e1df 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java @@ -17,6 +17,8 @@ import org.gradle.api.Project; import java.io.File; +import java.util.Arrays; +import java.util.Map; /** * This plugin configures formatting for Java source using Spotless @@ -64,7 +66,8 @@ public void apply(Project project) { java.importOrderFile(new File(elasticsearchWorkspace, importOrderPath)); // Most formatting is done through the Eclipse formatter - java.eclipse().configFile(new File(elasticsearchWorkspace, formatterConfigPath)); + java.eclipse().withP2Mirrors(Map.of("https://download.eclipse.org/", "https://mirror.umd.edu/eclipse/")) + .configFile(new File(elasticsearchWorkspace, formatterConfigPath)); // Ensure blank lines are actually empty. Since formatters are applied in // order, apply this one last, otherwise non-empty blank lines can creep diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationTask.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationTask.java index 9d06e632ec928..89bab313a0069 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationTask.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PomValidationTask.java @@ -16,6 +16,8 @@ import org.gradle.api.file.RegularFileProperty; import org.gradle.api.model.ObjectFactory; import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskAction; import java.io.FileReader; @@ -37,6 +39,7 @@ public PomValidationTask(ObjectFactory objects) { } @InputFile + @PathSensitive(PathSensitivity.RELATIVE) public RegularFileProperty getPomFile() { return pomFile; } diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 38d3c0cd326f9..f2a02645f8c09 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -258,9 +258,6 @@ tasks.named('licenseHeaders').configure { *****************************************************************************/ repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() gradlePluginPortal() } @@ -386,10 +383,13 @@ tasks.named("jar") { spotless { java { - // IDEs can sometimes run annotation processors that leave files in - // here, causing Spotless to complain. Even though this path ought not - // to exist, exclude it anyway in order to avoid spurious failures. - toggleOffOn() + + // workaround for https://github.com/diffplug/spotless/issues/2317 + //toggleOffOn() + target project.fileTree("src/main/java") { + include '**/*.java' + exclude '**/DockerBase.java' + } } } diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index 6acc1431eaec1..22286c90de3d1 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=2ab88d6de2c23e6adae7363ae6e29cbdd2a709e992929b48b6530fd0c7133bd6 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip +distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a +distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/settings.gradle b/build-tools-internal/settings.gradle index 1b4fb1215a59d..8c88d36046768 100644 --- a/build-tools-internal/settings.gradle +++ b/build-tools-internal/settings.gradle @@ -1,8 +1,5 @@ pluginManagement { repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() gradlePluginPortal() } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy index 03b044583add0..63bb732d8a11d 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/BuildPluginFuncTest.groovy @@ -119,7 +119,7 @@ class BuildPluginFuncTest extends AbstractGradleFuncTest { noticeFile.set(file("NOTICE")) """ when: - def result = gradleRunner("assemble", "-x", "generateHistoricalFeaturesMetadata").build() + def result = gradleRunner("assemble", "-x", "generateClusterFeaturesMetadata").build() then: result.task(":assemble").outcome == TaskOutcome.SUCCESS file("build/distributions/hello-world.jar").exists() diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavaPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavaPluginFuncTest.groovy index 9fc6aa7276b2d..36a43c4b739b6 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavaPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavaPluginFuncTest.groovy @@ -20,9 +20,6 @@ class ElasticsearchJavaPluginFuncTest extends AbstractGradleInternalPluginFuncTe when: buildFile.text << """ import org.elasticsearch.gradle.Architecture - import org.elasticsearch.gradle.internal.info.BuildParams - BuildParams.init { it.setMinimumRuntimeVersion(JavaVersion.VERSION_1_10) } - assert tasks.named('compileJava').get().sourceCompatibility == JavaVersion.VERSION_1_10.toString() assert tasks.named('compileJava').get().targetCompatibility == JavaVersion.VERSION_1_10.toString() """ diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy index b853fdef6a13f..34fa73ce502ac 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/ElasticsearchJavadocPluginFuncTest.groovy @@ -73,7 +73,7 @@ class ElasticsearchJavadocPluginFuncTest extends AbstractGradleFuncTest { buildFile << """ plugins { id 'elasticsearch.java-doc' - id 'com.github.johnrengelman.shadow' version '7.1.2' + id 'com.gradleup.shadow' id 'java' } group = 'org.acme.depending' diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index 87f4bbee05780..6d080e1c80763 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -9,16 +9,10 @@ package org.elasticsearch.gradle.internal -import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.fixtures.AbstractGitAwareGradleFuncTest import org.gradle.testkit.runner.TaskOutcome -import spock.lang.IgnoreIf import spock.lang.Unroll -/* - * Test is ignored on ARM since this test case tests the ability to build certain older BWC branches that we don't support on ARM - */ -@IgnoreIf({ Architecture.current() == Architecture.AARCH64 }) class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest { def setup() { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy index 94df02b280ca6..a4635a7232754 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy @@ -9,7 +9,6 @@ package org.elasticsearch.gradle.internal - import spock.lang.Unroll import com.github.tomakehurst.wiremock.WireMockServer @@ -24,8 +23,7 @@ import java.nio.file.Paths import java.util.regex.Matcher import java.util.regex.Pattern -import static org.elasticsearch.gradle.internal.JdkDownloadPlugin.VENDOR_ADOPTIUM -import static org.elasticsearch.gradle.internal.JdkDownloadPlugin.VENDOR_OPENJDK +import static org.elasticsearch.gradle.internal.JdkDownloadPlugin.* class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { @@ -33,13 +31,11 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { private static final String ADOPT_JDK_VERSION = "12.0.2+10" private static final String ADOPT_JDK_VERSION_11 = "11.0.10+9" private static final String ADOPT_JDK_VERSION_15 = "15.0.2+7" + private static final String AZUL_JDK_VERSION_8 = "8u302+b08" + private static final String AZUL_8_DISTRO_VERSION = "8.56.0.23" private static final String OPEN_JDK_VERSION = "12.0.1+99@123456789123456789123456789abcde" private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)") - def setup() { - configurationCacheCompatible = false - } - @Unroll def "jdk #jdkVendor for #platform#suffix are downloaded and extracted"() { given: @@ -56,14 +52,16 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { version = '$jdkVersion' platform = "$platform" architecture = '$arch' + distributionVersion = '$distributionVersion' } } - def theJdks = jdks +// def theJdks = jdks tasks.register("getJdk") { dependsOn jdks.myJdk + def jdk = jdks.myJdk doLast { - println "JDK HOME: " + theJdks.myJdk + println "JDK HOME: " + jdk } } """ @@ -78,22 +76,23 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { assertExtraction(result.output, expectedJavaBin); where: - platform | arch | jdkVendor | jdkVersion | expectedJavaBin | suffix - "linux" | "x64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | "bin/java" | "" - "linux" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | "bin/java" | "" - "linux" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | "bin/java" | "(old version)" - "windows" | "x64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | "bin/java" | "" - "windows" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | "bin/java" | "" - "windows" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | "bin/java" | "(old version)" - "darwin" | "x64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | "Contents/Home/bin/java" | "" - "darwin" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | "Contents/Home/bin/java" | "" - "darwin" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | "Contents/Home/bin/java" | "(old version)" - "mac" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | "Contents/Home/bin/java" | "" - "mac" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | "Contents/Home/bin/java" | "(old version)" - "darwin" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | "Contents/Home/bin/java" | "" - "linux" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | "bin/java" | "" - "linux" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION_11 | "bin/java" | "(jdk 11)" - "linux" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION_15 | "bin/java" | "(jdk 15)" + platform | arch | jdkVendor | jdkVersion | distributionVersion | expectedJavaBin | suffix + "linux" | "x64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | null | "bin/java" | "" + "linux" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | null | "bin/java" | "" + "linux" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | null | "bin/java" | "(old version)" + "windows" | "x64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | null | "bin/java" | "" + "windows" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | null | "bin/java" | "" + "windows" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | null | "bin/java" | "(old version)" + "darwin" | "x64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | null | "Contents/Home/bin/java" | "" + "darwin" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | null | "Contents/Home/bin/java" | "" + "darwin" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | null | "Contents/Home/bin/java" | "(old version)" + "mac" | "x64" | VENDOR_OPENJDK | OPEN_JDK_VERSION | null | "Contents/Home/bin/java" | "" + "mac" | "x64" | VENDOR_OPENJDK | OPENJDK_VERSION_OLD | null | "Contents/Home/bin/java" | "(old version)" + "darwin" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | null | "Contents/Home/bin/java" | "" + "linux" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | null | "bin/java" | "" + "linux" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION_11 | null | "bin/java" | "(jdk 11)" + "linux" | "aarch64" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION_15 | null | "bin/java" | "(jdk 15)" + "darwin" | "aarch64" | VENDOR_ZULU | AZUL_JDK_VERSION_8 | AZUL_8_DISTRO_VERSION | "Contents/Home/bin/java" | "(jdk 8)" } def "transforms are reused across projects"() { @@ -118,9 +117,10 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { } } tasks.register("getJdk") { - dependsOn jdks.myJdk + def jdk = jdks.myJdk + dependsOn jdk doLast { - println "JDK HOME: " + jdks.myJdk + println "JDK HOME: " + jdk } } """ @@ -137,7 +137,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { result.output.count("Unpacking linux-12.0.2-x64.tar.gz using ${SymbolicLinkPreservingUntarTransform.simpleName}") == 1 where: - platform | jdkVendor | jdkVersion | expectedJavaBin + platform | jdkVendor | jdkVersion | expectedJavaBin "linux" | VENDOR_ADOPTIUM | ADOPT_JDK_VERSION | "bin/java" } @@ -159,6 +159,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { vendor = '$VENDOR_ADOPTIUM' version = '$ADOPT_JDK_VERSION' platform = "$platform" + distributionVersion = '$ADOPT_JDK_VERSION' architecture = "x64" } } @@ -204,6 +205,8 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { assert matcher.find() == true; String jdkHome = matcher.group(1); Path javaPath = Paths.get(jdkHome, javaBin); + println "canonical " + javaPath.toFile().getCanonicalPath() + Paths.get(jdkHome).toFile().listFiles().each { println it } assert Files.exists(javaPath) == true; true } @@ -221,15 +224,26 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { final String versionPath = isOld ? "jdk1/99" : "jdk12.0.1/123456789123456789123456789abcde/99"; final String filename = "openjdk-" + (isOld ? "1" : "12.0.1") + "_" + effectivePlatform + "-x64_bin." + extension(platform); return "/java/GA/" + versionPath + "/GPL/" + filename; + } else if (vendor.equals(VENDOR_ZULU)) { + // we only have a single version of zulu currently in the tests + return "/zulu/bin/zulu8.56.0.23-ca-jdk8.0.302-macosx_aarch64.tar.gz" } } private static byte[] filebytes(final String vendor, final String platform) throws IOException { final String effectivePlatform = getPlatform(vendor, platform); if (vendor.equals(VENDOR_ADOPTIUM)) { - return JdkDownloadPluginFuncTest.class.getResourceAsStream("fake_adoptium_" + effectivePlatform + "." + extension(platform)).getBytes() + return JdkDownloadPluginFuncTest.class.getResourceAsStream( + "fake_adoptium_" + effectivePlatform + "." + extension(platform) + ).getBytes() } else if (vendor.equals(VENDOR_OPENJDK)) { - JdkDownloadPluginFuncTest.class.getResourceAsStream("fake_openjdk_" + effectivePlatform + "." + extension(platform)).getBytes() + return JdkDownloadPluginFuncTest.class.getResourceAsStream( + "fake_openjdk_" + effectivePlatform + "." + extension(platform) + ).getBytes() + } else { + // zulu + String resourcePath = "fake_zulu_" + effectivePlatform + "." + extension(platform) + return JdkDownloadPluginFuncTest.class.getResourceAsStream(resourcePath).getBytes() } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index 6e403c85a23f4..a199ff9d3eac5 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -96,7 +96,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.java' id 'elasticsearch.publish' - id 'com.github.johnrengelman.shadow' + id 'com.gradleup.shadow' } repositories { @@ -117,7 +117,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { } version = "1.0" group = 'org.acme' - description = 'some description' + description = 'shadowed project' """ when: @@ -137,7 +137,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { hello-world 1.0 hello-world - some description + shadowed project unknown unknown @@ -186,7 +186,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.java' id 'elasticsearch.publish' - id 'com.github.johnrengelman.shadow' + id 'com.gradleup.shadow' } dependencies { @@ -206,7 +206,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { group = 'org.acme' } - description = 'some description' + description = 'with shadowed dependencies' """ when: @@ -226,7 +226,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { hello-world 1.0 hello-world - some description + with shadowed dependencies unknown unknown @@ -277,13 +277,13 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { plugins { id 'elasticsearch.internal-es-plugin' id 'elasticsearch.publish' - id 'com.github.johnrengelman.shadow' + id 'com.gradleup.shadow' } esplugin { name = 'hello-world-plugin' classname 'org.acme.HelloWorldPlugin' - description = "custom project description" + description = "shadowed es plugin" } publishing { @@ -303,7 +303,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { """ when: - def result = gradleRunner('assemble', '--stacktrace', '-x', 'generateHistoricalFeaturesMetadata').build() + def result = gradleRunner('assemble', '--stacktrace', '-x', 'generateClusterFeaturesMetadata').build() then: result.task(":generatePom").outcome == TaskOutcome.SUCCESS @@ -324,7 +324,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { hello-world-plugin 1.0 hello-world - custom project description + shadowed es plugin unknown unknown @@ -353,7 +353,6 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { https://www.elastic.co - """ ) } @@ -440,8 +439,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { // scm info only added for internal builds internalBuild() buildFile << """ - BuildParams.init { it.setGitOrigin("https://some-repo.com/repo.git") } - + buildParams.getGitOriginProperty().set("https://some-repo.com/repo.git") apply plugin:'elasticsearch.java' apply plugin:'elasticsearch.publish' diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy index 354100a9b82c5..725f117d17e64 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePluginFuncTest.groovy @@ -161,7 +161,7 @@ class SnykDependencyMonitoringGradlePluginFuncTest extends AbstractGradleInterna }, "target": { "remoteUrl": "http://acme.org", - "branch": "unknown" + "branch": "$version" }, "targetReference": "$version", "projectAttributes": { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy index 97f03d9821117..ce5c1519fe11f 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy @@ -10,6 +10,7 @@ package org.elasticsearch.gradle.internal.test.rest import spock.lang.IgnoreIf +import spock.lang.IgnoreRest import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest @@ -20,16 +21,16 @@ import org.gradle.testkit.runner.TaskOutcome class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def setup() { + configurationCacheCompatible = true buildApiRestrictionsDisabled = true } def "yamlRestTest does nothing when there are no tests"() { given: + internalBuild() buildFile << """ - plugins { - id 'elasticsearch.legacy-yaml-rest-test' - } + apply plugin: 'elasticsearch.legacy-yaml-rest-test' """ when: @@ -136,7 +137,7 @@ class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { """ when: - def result = gradleRunner("yamlRestTest", "--console", 'plain', '--stacktrace').buildAndFail() + def result = gradleRunner("yamlRestTest", "--console", 'plain').buildAndFail() then: result.task(":distribution:archives:integ-test-zip:buildExpanded").outcome == TaskOutcome.SUCCESS diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_zulu_macos.tar.gz b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_zulu_macos.tar.gz new file mode 100644 index 0000000000000..87361b67ec76c Binary files /dev/null and b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_zulu_macos.tar.gz differ diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 592e6af41ab00..847eda7a355c0 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -12,11 +12,14 @@ import java.time.LocalDateTime; import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.internal.info.BuildParams +import static org.elasticsearch.gradle.internal.util.CiUtils.safeName import java.lang.management.ManagementFactory import java.time.LocalDateTime +// Resolving this early to avoid issues with the build scan plugin in combination with the configuration cache usage +def taskNames = gradle.startParameter.taskNames.join(' ') + develocity { buildScan { @@ -34,12 +37,15 @@ develocity { publishing.onlyIf { false } } + def fips = buildParams.inFipsJvm + def gitRevision = buildParams.gitRevision + background { tag OS.current().name() tag Architecture.current().name() // Tag if this build is run in FIPS mode - if (BuildParams.inFipsJvm) { + if (fips) { tag 'FIPS' } @@ -92,8 +98,8 @@ develocity { link 'Source', "${prBaseUrl}/tree/${System.getenv('BUILDKITE_COMMIT')}" link 'Pull Request', "https://github.com/${repository}/pull/${prId}" } else { - value 'Git Commit ID', BuildParams.gitRevision - link 'Source', "https://github.com/${repository}/tree/${BuildParams.gitRevision}" + value 'Git Commit ID', gitRevision + link 'Source', "https://github.com/${repository}/tree/${gitRevision}" } buildFinished { result -> @@ -108,7 +114,7 @@ develocity { // Add a build annotation // See: https://buildkite.com/docs/agent/v3/cli-annotate - def body = """
${System.getenv('BUILDKITE_LABEL')} :gradle: ${result.failures ? 'failed' : 'successful'} build: gradle ${gradle.startParameter.taskNames.join(' ')}
""" + def body = """
${System.getenv('BUILDKITE_LABEL')} :gradle: ${result.failures ? 'failed' : 'successful'} build: gradle ${taskNames}
""" def process = [ 'buildkite-agent', 'annotate', @@ -129,7 +135,3 @@ develocity { } } } - -static def safeName(String string) { - return string.replaceAll(/[^a-zA-Z0-9_\-\.]+/, ' ').trim().replaceAll(' ', '_').toLowerCase() -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index 9a988292b5b8c..77e509ea97870 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -19,7 +18,7 @@ ext.bwcTaskName = { Version version -> def bwcTestSnapshots = tasks.register("bwcTestSnapshots") { if (project.bwc_tests_enabled) { - dependsOn tasks.matching { task -> BuildParams.bwcVersions.unreleased.any { version -> bwcTaskName(version) == task.name } } + dependsOn tasks.matching { task -> buildParams.bwcVersions.unreleased.any { version -> bwcTaskName(version) == task.name } } } } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 3bff30d9511fb..14e2323b4d14d 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -9,27 +9,26 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.rest.RestTestBasePlugin import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask import org.elasticsearch.gradle.testclusters.TestClustersAware -import org.elasticsearch.gradle.testclusters.TestDistribution -// Common config when running with a FIPS-140 runtime JVM -if (BuildParams.inFipsJvm) { +//apply plugin: org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin +// Common config when running with a FIPS-140 runtime JVM +if (buildParams.inFipsJvm) { allprojects { - String javaSecurityFilename = BuildParams.runtimeJavaDetails.toLowerCase().contains('oracle') ? 'fips_java_oracle.security' : 'fips_java.security' + String javaSecurityFilename = buildParams.runtimeJavaDetails.get().toLowerCase().contains('oracle') ? 'fips_java_oracle.security' : 'fips_java.security' File fipsResourcesDir = new File(project.buildDir, 'fips-resources') File fipsSecurity = new File(fipsResourcesDir, javaSecurityFilename) File fipsPolicy = new File(fipsResourcesDir, 'fips_java.policy') File fipsTrustStore = new File(fipsResourcesDir, 'cacerts.bcfks') - def bcFips = dependencies.create('org.bouncycastle:bc-fips:1.0.2.4') - def bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.17') + def bcFips = dependencies.create('org.bouncycastle:bc-fips:1.0.2.5') + def bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.19') def manualDebug = false; //change this to manually debug bouncy castle in an IDE if(manualDebug) { - bcFips = dependencies.create('org.bouncycastle:bc-fips-debug:1.0.2.4') - bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.17'){ + bcFips = dependencies.create('org.bouncycastle:bc-fips-debug:1.0.2.5') + bcTlsFips = dependencies.create('org.bouncycastle:bctls-fips:1.0.19'){ exclude group: 'org.bouncycastle', module: 'bc-fips' // to avoid jar hell } } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 63a3cb6d86d68..895cca2af7967 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.GradleUtils -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.TestUtil import org.jetbrains.gradle.ext.JUnit @@ -143,13 +142,18 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { description = 'Enables preview features on native library module' dependsOn tasks.named("enableExternalConfiguration") - doLast { - ['main', 'test'].each { sourceSet -> - modifyXml(".idea/modules/libs/native/elasticsearch.libs.${project.project(':libs:native').name}.${sourceSet}.iml") { xml -> - xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = 'JDK_21_PREVIEW' + ext { + enablePreview = { moduleFile, languageLevel -> + modifyXml(moduleFile) { xml -> + xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = languageLevel } } } + + doLast { + enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') + enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') + } } tasks.register('buildDependencyArtifacts') { @@ -171,7 +175,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { idea { project { vcs = 'Git' - jdkName = BuildParams.minimumCompilerVersion.majorVersion + jdkName = buildParams.minimumCompilerVersion.majorVersion settings { delegateActions { diff --git a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle index aacc86e764d51..224e6bd4c50d1 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.precommit.ThirdPartyAuditPrecommitPlugin import org.elasticsearch.gradle.internal.precommit.ThirdPartyAuditTask import org.elasticsearch.gradle.internal.test.rest.RestTestBasePlugin @@ -27,8 +26,8 @@ configure(allprojects) { JvmVendorSpec.matching(VersionProperties.bundledJdkVendor) } project.tasks.withType(Test).configureEach { Test test -> - if (BuildParams.getIsRuntimeJavaHomeSet()) { - test.executable = "${BuildParams.runtimeJavaHome}/bin/java" + + if (buildParams.getIsRuntimeJavaHomeSet()) { + test.executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') } else { test.javaLauncher = javaToolchains.launcherFor { @@ -41,7 +40,7 @@ configure(allprojects) { } project.plugins.withId("elasticsearch.testclusters") { testClustersPlugin -> project.plugins.withId("elasticsearch.internal-testclusters") { internalPlugin -> - if (BuildParams.getIsRuntimeJavaHomeSet() == false) { + if (buildParams.getIsRuntimeJavaHomeSet() == false) { // If no runtime java home is set, use the bundled JDK for test clusters testClustersPlugin.setRuntimeJava(launcher.map { it.metadata.installationPath.asFile }) } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle index 1fab4d035177a..3f506ae954df8 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle @@ -3,7 +3,6 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.internal.JarApiComparisonTask -import org.elasticsearch.gradle.internal.info.BuildParams import static org.elasticsearch.gradle.internal.InternalDistributionBwcSetupPlugin.buildBwcTaskName @@ -17,11 +16,11 @@ dependencies { newJar project(":libs:${project.name}") } -BuildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince)) +buildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince)) && it != VersionProperties.elasticsearchVersion }) { bwcVersion, baseName -> - BwcVersions.UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) + BwcVersions.UnreleasedVersionInfo unreleasedVersion = buildParams.bwcVersions.unreleasedInfo(bwcVersion) configurations { "oldJar${baseName}" { diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy index ad37fa9f02c8c..6c87149095186 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntFixtureStop.groovy @@ -15,16 +15,12 @@ import org.elasticsearch.gradle.internal.test.AntFixture import org.gradle.api.file.FileSystemOperations import org.gradle.api.file.ProjectLayout import org.gradle.api.provider.ProviderFactory -import org.gradle.api.tasks.Internal import org.gradle.process.ExecOperations import javax.inject.Inject abstract class AntFixtureStop extends LoggedExec implements FixtureStop { - @Internal - AntFixture fixture - @Inject AntFixtureStop(ProjectLayout projectLayout, ExecOperations execOperations, @@ -34,12 +30,12 @@ abstract class AntFixtureStop extends LoggedExec implements FixtureStop { } void setFixture(AntFixture fixture) { - assert this.fixture == null - this.fixture = fixture; - final Object pid = "${-> this.fixture.pid}" - onlyIf("pidFile exists") { fixture.pidFile.exists() } + def pidFile = fixture.pidFile + def fixtureName = fixture.name + final Object pid = "${-> Integer.parseInt(pidFile.getText('UTF-8').trim())}" + onlyIf("pidFile exists") { pidFile.exists() } doFirst { - logger.info("Shutting down ${fixture.name} with pid ${pid}") + logger.info("Shutting down ${fixtureName} with pid ${pid}") } if (OS.current() == OS.WINDOWS) { @@ -51,9 +47,8 @@ abstract class AntFixtureStop extends LoggedExec implements FixtureStop { } doLast { fileSystemOperations.delete { - it.delete(fixture.pidFile) + it.delete(pidFile) } } - this.fixture = fixture } } diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntTask.groovy index 81f21f8c62d86..01a3bdaee2337 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/AntTask.groovy @@ -29,11 +29,6 @@ import java.nio.charset.Charset */ public abstract class AntTask extends DefaultTask { - /** - * A buffer that will contain the output of the ant code run, - * if the output was not already written directly to stdout. - */ - public final ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream() @Inject protected FileSystemOperations getFileSystemOperations() { @@ -57,6 +52,11 @@ public abstract class AntTask extends DefaultTask { // otherwise groovy replaces System.out, and you have no chance to debug // ant.saveStreams = false + /** + * A buffer that will contain the output of the ant code run, + * if the output was not already written directly to stdout. + */ + ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream() final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : Project.MSG_INFO final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name()) diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/test/AntFixture.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/test/AntFixture.groovy index f2837ff40fb79..88a68f1194858 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/test/AntFixture.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/test/AntFixture.groovy @@ -10,22 +10,37 @@ package org.elasticsearch.gradle.internal.test import org.elasticsearch.gradle.OS + import org.elasticsearch.gradle.internal.AntFixtureStop import org.elasticsearch.gradle.internal.AntTask +import org.elasticsearch.gradle.testclusters.TestClusterInfo +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersRegistry import org.gradle.api.GradleException +import org.gradle.api.file.ProjectLayout +import org.gradle.api.provider.Property +import org.gradle.api.provider.Provider +import org.gradle.api.provider.ProviderFactory +import org.gradle.api.provider.ValueSource +import org.gradle.api.provider.ValueSourceParameters +import org.gradle.api.tasks.Input import org.gradle.api.tasks.Internal import org.gradle.api.tasks.TaskProvider +import javax.inject.Inject + /** * A fixture for integration tests which runs in a separate process launched by Ant. */ -class AntFixture extends AntTask implements Fixture { +class AntFixture extends AntTask { /** The path to the executable that starts the fixture. */ @Internal String executable private final List arguments = new ArrayList<>() + private ProjectLayout projectLayout + private final ProviderFactory providerFactory void args(Object... args) { arguments.addAll(args) @@ -69,19 +84,14 @@ class AntFixture extends AntTask implements Fixture { return tmpFile.exists() } - private final TaskProvider stopTask - - AntFixture() { - stopTask = createStopTask() + @Inject + AntFixture(ProjectLayout projectLayout, ProviderFactory providerFactory) { + this.providerFactory = providerFactory + this.projectLayout = projectLayout; + TaskProvider stopTask = createStopTask() finalizedBy(stopTask) } - @Override - @Internal - TaskProvider getStopTask() { - return stopTask - } - @Override protected void runAnt(AntBuilder ant) { // reset everything @@ -231,7 +241,7 @@ class AntFixture extends AntTask implements Fixture { */ @Internal protected File getBaseDir() { - return new File(project.buildDir, "fixtures/${name}") + return new File(projectLayout.getBuildDirectory().getAsFile().get(), "fixtures/${name}") } /** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */ @@ -242,7 +252,7 @@ class AntFixture extends AntTask implements Fixture { /** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */ @Internal - protected File getPidFile() { + File getPidFile() { return new File(baseDir, 'pid') } @@ -264,6 +274,12 @@ class AntFixture extends AntTask implements Fixture { return portsFile.readLines("UTF-8").get(0) } + @Internal + Provider getAddressAndPortProvider() { + File thePortFile = portsFile + return providerFactory.provider(() -> thePortFile.readLines("UTF-8").get(0)) + } + /** Returns a file that wraps around the actual command when {@code spawn == true}. */ @Internal protected File getWrapperScript() { @@ -281,4 +297,22 @@ class AntFixture extends AntTask implements Fixture { protected File getRunLog() { return new File(cwd, 'run.log') } + + @Internal + Provider getAddressAndPortSource() { + return providerFactory.of(AntFixtureValueSource.class, spec -> { + spec.getParameters().getPortFile().set(portsFile); + }); + } + + static abstract class AntFixtureValueSource implements ValueSource { + @Override + String obtain() { + return getParameters().getPortFile().map { it.readLines("UTF-8").get(0) }.get() + } + + interface Parameters extends ValueSourceParameters { + Property getPortFile(); + } + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java index 04f031d4a5169..2b79bc2b9173e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BaseInternalPluginBuildPlugin.java @@ -12,9 +12,9 @@ import groovy.lang.Closure; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.precommit.JarHellPrecommitPlugin; -import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; +import org.elasticsearch.gradle.internal.test.ClusterFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; @@ -38,7 +38,8 @@ public void apply(Project project) { project.getPluginManager().apply(PluginBuildPlugin.class); project.getPluginManager().apply(JarHellPrecommitPlugin.class); project.getPluginManager().apply(ElasticsearchJavaPlugin.class); - project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); + project.getPluginManager().apply(ClusterFeaturesMetadataPlugin.class); + boolean isCi = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class).isCi(); // Clear default dependencies added by public PluginBuildPlugin as we add our // own project dependencies for internal builds // TODO remove once we removed default dependencies from PluginBuildPlugin @@ -54,7 +55,7 @@ public void apply(Project project) { .set("addQaCheckDependencies", new Closure(BaseInternalPluginBuildPlugin.this, BaseInternalPluginBuildPlugin.this) { public void doCall(Project proj) { // This is only a convenience for local developers so make this a noop when running in CI - if (BuildParams.isCi() == false) { + if (isCi == false) { proj.afterEvaluate(project1 -> { // let check depend on check tasks of qa sub-projects final var checkTaskProvider = project1.getTasks().named("check"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java index 75984e1bc6998..fb8a9858e24d5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BuildPlugin.java @@ -12,7 +12,7 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; import org.elasticsearch.gradle.internal.snyk.SnykDependencyMonitoringGradlePlugin; -import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; +import org.elasticsearch.gradle.internal.test.ClusterFeaturesMetadataPlugin; import org.gradle.api.InvalidUserDataException; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -63,7 +63,7 @@ public void apply(final Project project) { project.getPluginManager().apply(ElasticsearchJavadocPlugin.class); project.getPluginManager().apply(DependenciesInfoPlugin.class); project.getPluginManager().apply(SnykDependencyMonitoringGradlePlugin.class); - project.getPluginManager().apply(HistoricalFeaturesMetadataPlugin.class); + project.getPluginManager().apply(ClusterFeaturesMetadataPlugin.class); InternalPrecommitTasks.create(project, true); configureLicenseAndNotice(project); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java index 40d16bafbb26b..5992a40275b46 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java @@ -13,7 +13,6 @@ import org.elasticsearch.gradle.LoggedExec; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; @@ -47,6 +46,7 @@ public class BwcSetupExtension { private final ProviderFactory providerFactory; private final JavaToolchainService toolChainService; private final Provider unreleasedVersionInfo; + private final Boolean isCi; private Provider checkoutDir; @@ -56,7 +56,8 @@ public BwcSetupExtension( ProviderFactory providerFactory, JavaToolchainService toolChainService, Provider unreleasedVersionInfo, - Provider checkoutDir + Provider checkoutDir, + Boolean isCi ) { this.project = project; this.objectFactory = objectFactory; @@ -64,6 +65,7 @@ public BwcSetupExtension( this.toolChainService = toolChainService; this.unreleasedVersionInfo = unreleasedVersionInfo; this.checkoutDir = checkoutDir; + this.isCi = isCi; } TaskProvider bwcTask(String name, Action configuration) { @@ -80,7 +82,8 @@ TaskProvider bwcTask(String name, Action configuration, toolChainService, name, configuration, - useUniqueUserHome + useUniqueUserHome, + isCi ); } @@ -93,7 +96,8 @@ private static TaskProvider createRunBwcGradleTask( JavaToolchainService toolChainService, String name, Action configAction, - boolean useUniqueUserHome + boolean useUniqueUserHome, + boolean isCi ) { return project.getTasks().register(name, LoggedExec.class, loggedExec -> { loggedExec.dependsOn("checkoutBwcBranch"); @@ -104,16 +108,16 @@ private static TaskProvider createRunBwcGradleTask( spec.getParameters().getCheckoutDir().set(checkoutDir); }).flatMap(s -> getJavaHome(objectFactory, toolChainService, Integer.parseInt(s)))); - if (BuildParams.isCi() && OS.current() != OS.WINDOWS) { + if (isCi && OS.current() != OS.WINDOWS) { // TODO: Disabled for now until we can figure out why files are getting corrupted // loggedExec.getEnvironment().put("GRADLE_RO_DEP_CACHE", System.getProperty("user.home") + "/gradle_ro_cache"); } if (OS.current() == OS.WINDOWS) { loggedExec.getExecutable().set("cmd"); - loggedExec.args("/C", "call", new File(checkoutDir.get(), "gradlew").toString()); + loggedExec.args("/C", "call", "gradlew"); } else { - loggedExec.getExecutable().set(new File(checkoutDir.get(), "gradlew").toString()); + loggedExec.getExecutable().set("./gradlew"); } if (useUniqueUserHome) { @@ -173,7 +177,7 @@ private static String readFromFile(File file) { } } - public static abstract class JavaHomeValueSource implements ValueSource { + public abstract static class JavaHomeValueSource implements ValueSource { private String minimumCompilerVersionPath(Version bwcVersion) { return (bwcVersion.onOrAfter(BUILD_TOOL_MINIMUM_VERSION)) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index f467a204c0348..93c2623a23d31 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; +import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -60,7 +61,8 @@ * We are then able to map the unreleased version to branches in git and Gradle projects that are capable of checking * out and building them, so we can include these in the testing plan as well. */ -public class BwcVersions { + +public class BwcVersions implements Serializable { private static final Pattern LINE_PATTERN = Pattern.compile( "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)?.*\\);" @@ -68,7 +70,7 @@ public class BwcVersions { private static final String GLIBC_VERSION_ENV_VAR = "GLIBC_VERSION"; private final Version currentVersion; - private final List versions; + private final transient List versions; private final Map unreleased; public BwcVersions(List versionLines) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 3e0a47a8f453c..0fb75b59b6096 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -13,10 +13,8 @@ * This class models the different Docker base images that are used to build Docker distributions of Elasticsearch. */ public enum DockerBase { - DEFAULT("ubuntu:20.04", "", "apt-get"), - // "latest" here is intentional, since the image name specifies "8" - UBI("docker.elastic.co/ubi8/ubi-minimal:latest", "-ubi", "microdnf"), + DEFAULT("docker.elastic.co/ubi8/ubi-minimal:latest", "", "microdnf"), // The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank", "yum"), @@ -24,7 +22,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:973431347ad45f40e01afbbd010bf9de929c088a63382239b90dd84f39618bc8", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:32f06b169bb4b0f257fbb10e8c8379f06d3ee1355c89b3327cb623781a29590e", "-wolfi", "apk" ), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 7d9537feaea56..14baa55794c95 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -147,12 +147,17 @@ interface Parameters extends FlowParameters { @SuppressWarnings("checkstyle:DescendantToken") @Override public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNotFoundException { + List filesToArchive = parameters.getFilteredFiles().get(); + if (filesToArchive.isEmpty()) { + return; + } File uploadFile = parameters.getUploadFile().get(); if (uploadFile.exists()) { getFileSystemOperations().delete(spec -> spec.delete(uploadFile)); } uploadFile.getParentFile().mkdirs(); - createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); + + createBuildArchiveTar(filesToArchive, parameters.getProjectDir().get(), uploadFile); if (uploadFile.exists() && "true".equals(System.getenv("BUILDKITE"))) { String uploadFilePath = uploadFile.getName(); File uploadFileDir = uploadFile.getParentFile(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 05b7af83aa8e4..c897b142da2fb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTaskPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.MutedTestPlugin; import org.elasticsearch.gradle.internal.test.TestUtil; @@ -49,6 +49,7 @@ public class ElasticsearchJavaBasePlugin implements Plugin { private final JavaToolchainService javaToolchains; + private BuildParameterExtension buildParams; @Inject ElasticsearchJavaBasePlugin(JavaToolchainService javaToolchains) { @@ -57,8 +58,10 @@ public class ElasticsearchJavaBasePlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); // make sure the global build info plugin is applied to the root project project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); + buildParams = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class); project.getPluginManager().apply(JavaBasePlugin.class); // common repositories setup project.getPluginManager().apply(RepositoriesSetupPlugin.class); @@ -129,14 +132,14 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S public void configureCompile(Project project) { project.getExtensions().getExtraProperties().set("compactProfile", "full"); JavaPluginExtension java = project.getExtensions().getByType(JavaPluginExtension.class); - if (BuildParams.getJavaToolChainSpec().isPresent()) { - java.toolchain(BuildParams.getJavaToolChainSpec().get()); + if (buildParams.getJavaToolChainSpec().isPresent()) { + java.toolchain(buildParams.getJavaToolChainSpec().get()); } - java.setSourceCompatibility(BuildParams.getMinimumRuntimeVersion()); - java.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion()); + java.setSourceCompatibility(buildParams.getMinimumRuntimeVersion()); + java.setTargetCompatibility(buildParams.getMinimumRuntimeVersion()); project.getTasks().withType(JavaCompile.class).configureEach(compileTask -> { compileTask.getJavaCompiler().set(javaToolchains.compilerFor(spec -> { - spec.getLanguageVersion().set(JavaLanguageVersion.of(BuildParams.getMinimumRuntimeVersion().getMajorVersion())); + spec.getLanguageVersion().set(JavaLanguageVersion.of(buildParams.getMinimumRuntimeVersion().getMajorVersion())); })); CompileOptions compileOptions = compileTask.getOptions(); @@ -159,7 +162,7 @@ public void configureCompile(Project project) { compileTask.getConventionMapping().map("sourceCompatibility", () -> java.getSourceCompatibility().toString()); compileTask.getConventionMapping().map("targetCompatibility", () -> java.getTargetCompatibility().toString()); compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); - compileOptions.setIncremental(BuildParams.isCi() == false); + compileOptions.setIncremental(buildParams.isCi() == false); }); // also apply release flag to groovy, which is used in build-tools project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java index d064c70c72819..e62c26c7fbc01 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java @@ -15,8 +15,10 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Action; +import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; @@ -24,6 +26,7 @@ import org.gradle.api.plugins.BasePlugin; import org.gradle.api.plugins.JavaLibraryPlugin; import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.bundling.Jar; import org.gradle.api.tasks.javadoc.Javadoc; @@ -34,6 +37,7 @@ import java.util.Map; import static org.elasticsearch.gradle.internal.conventions.util.Util.toStringable; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * A wrapper around Gradle's Java plugin that applies our @@ -42,13 +46,15 @@ public class ElasticsearchJavaPlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + Property buildParams = loadBuildParams(project); project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(JavaLibraryPlugin.class); project.getPluginManager().apply(ElasticsearchJavaModulePathPlugin.class); // configureConfigurations(project); - configureJars(project); - configureJarManifest(project); + configureJars(project, buildParams.get()); + configureJarManifest(project, buildParams.get()); configureJavadoc(project); testCompileOnlyDeps(project); } @@ -63,7 +69,9 @@ private static void testCompileOnlyDeps(Project project) { /** * Adds additional manifest info to jars */ - static void configureJars(Project project) { + static void configureJars(Project project, BuildParameterExtension buildParams) { + String buildDate = buildParams.getBuildDate().toString(); + JavaVersion gradleJavaVersion = buildParams.getGradleJavaVersion(); project.getTasks().withType(Jar.class).configureEach(jarTask -> { // we put all our distributable files under distributions jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); @@ -75,14 +83,11 @@ static void configureJars(Project project) { public void execute(Task task) { // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes - jarTask.getManifest() - .attributes( - Map.of("Build-Date", BuildParams.getBuildDate(), "Build-Java-Version", BuildParams.getGradleJavaVersion()) - ); + jarTask.getManifest().attributes(Map.of("Build-Date", buildDate, "Build-Java-Version", gradleJavaVersion)); } }); }); - project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { + project.getPluginManager().withPlugin("com.gradleup.shadow", p -> { project.getTasks().withType(ShadowJar.class).configureEach(shadowJar -> { /* * Replace the default "-all" classifier with null @@ -102,10 +107,13 @@ public void execute(Task task) { }); } - private static void configureJarManifest(Project project) { + private static void configureJarManifest(Project project, BuildParameterExtension buildParams) { + String gitOrigin = buildParams.getGitOrigin(); + String gitRevision = buildParams.getGitRevision(); + project.getPlugins().withType(InfoBrokerPlugin.class).whenPluginAdded(manifestPlugin -> { - manifestPlugin.add("Module-Origin", toStringable(BuildParams::getGitOrigin)); - manifestPlugin.add("Change", toStringable(BuildParams::getGitRevision)); + manifestPlugin.add("Module-Origin", toStringable(() -> gitOrigin)); + manifestPlugin.add("Change", toStringable(() -> gitRevision)); manifestPlugin.add("X-Compile-Elasticsearch-Version", toStringable(VersionProperties::getElasticsearch)); manifestPlugin.add("X-Compile-Lucene-Version", toStringable(VersionProperties::getLucene)); manifestPlugin.add( diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavadocPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavadocPlugin.java index 42a44edd7f9a5..aca310cbf1e47 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavadocPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavadocPlugin.java @@ -82,12 +82,15 @@ private void configureJavadocForConfiguration(Project project, boolean shadow, C .sorted(Comparator.comparing(Dependency::getGroup)) .filter(d -> d instanceof ProjectDependency) .map(d -> (ProjectDependency) d) - .filter(p -> p.getDependencyProject() != null) .forEach(projectDependency -> configureDependency(project, shadow, projectDependency)); } private void configureDependency(Project project, boolean shadowed, ProjectDependency dep) { - var upstreamProject = dep.getDependencyProject(); + // we should use variant aware dependency management to resolve artifacts required for javadoc here + Project upstreamProject = project.project(dep.getPath()); + if (upstreamProject == null) { + return; + } if (shadowed) { /* * Include the source of shadowed upstream projects so we don't diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 7a831fbcc1464..4446952fec2bb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -13,7 +13,7 @@ import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener; import org.elasticsearch.gradle.internal.test.SimpleCommandLineArgumentProvider; @@ -26,6 +26,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPlugin; +import org.gradle.api.provider.Property; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; @@ -37,6 +38,7 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; import static org.elasticsearch.gradle.util.FileUtils.mkdirs; import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure; @@ -52,6 +54,9 @@ public abstract class ElasticsearchTestBasePlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + Property buildParams = loadBuildParams(project); + project.getPluginManager().apply(GradleTestPolicySetupPlugin.class); // for fips mode check project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); @@ -100,7 +105,7 @@ public void execute(Task t) { test.getExtensions().add("nonInputProperties", nonInputProperties); test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); - test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); + test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", buildParams.get().getDefaultParallel().toString()))); test.exclude("**/*$*.class"); @@ -146,9 +151,9 @@ public void execute(Task t) { // ignore changing test seed when build is passed -Dignore.tests.seed for cacheability experimentation if (System.getProperty("ignore.tests.seed") != null) { - nonInputProperties.systemProperty("tests.seed", BuildParams.getTestSeed()); + nonInputProperties.systemProperty("tests.seed", buildParams.get().getTestSeed()); } else { - test.systemProperty("tests.seed", BuildParams.getTestSeed()); + test.systemProperty("tests.seed", buildParams.get().getTestSeed()); } // don't track these as inputs since they contain absolute paths and break cache relocatability @@ -193,7 +198,7 @@ public void execute(Task t) { * If this project builds a shadow JAR than any unit tests should test against that artifact instead of * compiled class output and dependency jars. This better emulates the runtime environment of consumers. */ - project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { + project.getPluginManager().withPlugin("com.gradleup.shadow", p -> { if (test.getName().equals(JavaPlugin.TEST_TASK_NAME)) { // Remove output class files and any other dependencies from the test classpath, since the shadow JAR includes these SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index fcf286ed471dd..c17127f9bbfcf 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -10,7 +10,7 @@ package org.elasticsearch.gradle.internal; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Action; import org.gradle.api.InvalidUserDataException; @@ -23,6 +23,7 @@ import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.Copy; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskProvider; import org.gradle.jvm.toolchain.JavaToolchainService; import org.gradle.language.base.plugins.LifecycleBasePlugin; @@ -39,6 +40,7 @@ import static java.util.Arrays.asList; import static java.util.Arrays.stream; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * We want to be able to do BWC tests for unreleased versions without relying on and waiting for snapshots. @@ -64,23 +66,29 @@ public void apply(Project project) { project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); project.getPlugins().apply(JvmToolchainsPlugin.class); toolChainService = project.getExtensions().getByType(JavaToolchainService.class); - BuildParams.getBwcVersions().forPreviousUnreleased((BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { + BuildParameterExtension buildParams = loadBuildParams(project).get(); + Boolean isCi = buildParams.isCi(); + buildParams.getBwcVersions().forPreviousUnreleased((BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { configureBwcProject( project.project(unreleasedVersion.gradleProjectPath()), + buildParams, unreleasedVersion, providerFactory, objectFactory, - toolChainService + toolChainService, + isCi ); }); } private static void configureBwcProject( Project project, + BuildParameterExtension buildParams, BwcVersions.UnreleasedVersionInfo versionInfo, ProviderFactory providerFactory, ObjectFactory objectFactory, - JavaToolchainService toolChainService + JavaToolchainService toolChainService, + Boolean isCi ) { ProjectLayout layout = project.getLayout(); Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); @@ -96,7 +104,8 @@ private static void configureBwcProject( providerFactory, toolChainService, versionInfoProvider, - checkoutDir + checkoutDir, + isCi ); BwcGitExtension gitExtension = project.getPlugins().apply(InternalBwcGitPlugin.class).getGitExtension(); Provider bwcVersion = versionInfoProvider.map(info -> info.version()); @@ -122,6 +131,7 @@ private static void configureBwcProject( for (DistributionProject distributionProject : distributionProjects) { createBuildBwcTask( bwcSetupExtension, + buildParams, project, bwcVersion, distributionProject.name, @@ -144,6 +154,7 @@ private static void configureBwcProject( createBuildBwcTask( bwcSetupExtension, + buildParams, project, bwcVersion, "jdbc", @@ -177,6 +188,7 @@ private static void configureBwcProject( createBuildBwcTask( bwcSetupExtension, + buildParams, project, bwcVersion, stableApiProject.getName(), @@ -296,6 +308,7 @@ public static String buildBwcTaskName(String projectName) { static void createBuildBwcTask( BwcSetupExtension bwcSetupExtension, + BuildParameterExtension buildParams, Project project, Provider bwcVersion, String projectName, @@ -310,13 +323,13 @@ static void createBuildBwcTask( File expectedOutputFile = useNativeExpanded ? new File(projectArtifact.expandedDistDir, "elasticsearch-" + bwcVersion.get() + "-SNAPSHOT") : projectArtifact.distFile; - c.getInputs().file(new File(project.getBuildDir(), "refspec")); + c.getInputs().file(new File(project.getBuildDir(), "refspec")).withPathSensitivity(PathSensitivity.RELATIVE); if (useNativeExpanded) { c.getOutputs().dir(expectedOutputFile); } else { c.getOutputs().files(expectedOutputFile); } - c.getOutputs().doNotCacheIf("BWC distribution caching is disabled for local builds", task -> BuildParams.isCi() == false); + c.getOutputs().doNotCacheIf("BWC distribution caching is disabled for local builds", task -> buildParams.isCi() == false); c.getArgs().add("-p"); c.getArgs().add(projectPath); c.getArgs().add(assembleTaskName); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 0bf4bcb33c23b..ec694de8ec597 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -20,7 +20,7 @@ import org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; @@ -35,6 +35,8 @@ import java.util.Map; import java.util.function.Function; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + /** * An internal elasticsearch build plugin that registers additional * distribution resolution strategies to the 'elasticsearch.download-distribution' plugin @@ -47,6 +49,8 @@ public void apply(Project project) { // this is needed for isInternal project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); project.getRootProject().getPluginManager().apply(DockerSupportPlugin.class); + BuildParameterExtension buildParams = loadBuildParams(project).get(); + DistributionDownloadPlugin distributionDownloadPlugin = project.getPlugins().apply(DistributionDownloadPlugin.class); Provider dockerSupport = GradleUtils.getBuildService( project.getGradle().getSharedServices(), @@ -55,7 +59,10 @@ public void apply(Project project) { distributionDownloadPlugin.setDockerAvailability( dockerSupport.map(dockerSupportService -> dockerSupportService.getDockerAvailability().isAvailable()) ); - registerInternalDistributionResolutions(DistributionDownloadPlugin.getRegistrationsContainer(project)); + registerInternalDistributionResolutions( + DistributionDownloadPlugin.getRegistrationsContainer(project), + buildParams.getBwcVersionsProperty() + ); } /** @@ -66,7 +73,7 @@ public void apply(Project project) { *

* BWC versions are resolved as project to projects under `:distribution:bwc`. */ - private void registerInternalDistributionResolutions(List resolutions) { + private void registerInternalDistributionResolutions(List resolutions, Provider bwcVersions) { resolutions.add(new DistributionResolution("local-build", (project, distribution) -> { if (isCurrentVersion(distribution)) { // non-external project, so depend on local build @@ -78,7 +85,7 @@ private void registerInternalDistributionResolutions(List { - BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions() + BwcVersions.UnreleasedVersionInfo unreleasedInfo = bwcVersions.get() .unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { if (distribution.getBundledJdk() == false) { @@ -166,9 +173,6 @@ private static String distributionProjectName(ElasticsearchDistribution distribu if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER) { return projectName + "docker" + archString + "-export"; } - if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_UBI) { - return projectName + "ubi-docker" + archString + "-export"; - } if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_IRONBANK) { return projectName + "ironbank-docker" + archString + "-export"; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java index c7ab83ff7829a..7e7ffad12a9a5 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java @@ -10,34 +10,29 @@ package org.elasticsearch.gradle.internal; import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.provider.ProviderFactory; -import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class InternalTestClustersPlugin implements Plugin { - private ProviderFactory providerFactory; - - @Inject - public InternalTestClustersPlugin(ProviderFactory providerFactory) { - this.providerFactory = providerFactory; - } - @Override public void apply(Project project) { project.getPlugins().apply(InternalDistributionDownloadPlugin.class); + project.getRootProject().getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + BuildParameterExtension buildParams = loadBuildParams(project).get(); project.getRootProject().getPluginManager().apply(InternalReaperPlugin.class); TestClustersPlugin testClustersPlugin = project.getPlugins().apply(TestClustersPlugin.class); - testClustersPlugin.setRuntimeJava(providerFactory.provider(() -> BuildParams.getRuntimeJavaHome())); + testClustersPlugin.setRuntimeJava(buildParams.getRuntimeJavaHome()); testClustersPlugin.setIsReleasedVersion( - version -> (version.equals(VersionProperties.getElasticsearchVersion()) && BuildParams.isSnapshotBuild() == false) - || BuildParams.getBwcVersions().unreleasedInfo(version) == null + version -> (version.equals(VersionProperties.getElasticsearchVersion()) && buildParams.isSnapshotBuild() == false) + || buildParams.getBwcVersions().unreleasedInfo(version) == null ); if (shouldConfigureTestClustersWithOneProcessor()) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/Jdk.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/Jdk.java index c70d421939d6d..4396a18c205c1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/Jdk.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/Jdk.java @@ -11,6 +11,7 @@ import org.gradle.api.Buildable; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.FileCollection; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskDependency; @@ -33,13 +34,14 @@ public class Jdk implements Buildable, Iterable { private static final Pattern LEGACY_VERSION_PATTERN = Pattern.compile("(\\d)(u\\d+)\\+(b\\d+?)(@([a-f0-9]{32}))?"); private final String name; - private final Configuration configuration; + private final FileCollection configuration; private final Property vendor; private final Property version; private final Property platform; private final Property architecture; private final Property distributionVersion; + private final String configurationName; private String baseVersion; private String major; private String build; @@ -47,6 +49,7 @@ public class Jdk implements Buildable, Iterable { Jdk(String name, Configuration configuration, ObjectFactory objectFactory) { this.name = name; + this.configurationName = configuration.getName(); this.configuration = configuration; this.vendor = objectFactory.property(String.class); this.version = objectFactory.property(String.class); @@ -137,7 +140,7 @@ public String getPath() { } public String getConfigurationName() { - return configuration.getName(); + return configurationName; } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java index 5b195cad3388f..3c278128e43f2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java @@ -21,8 +21,6 @@ import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.attributes.Attribute; -import java.util.Arrays; - /** * @deprecated We wanna get rid from this and custom jdk downloads via this plugin and * make leverage the gradle toolchain resolver capabilities. @@ -38,8 +36,8 @@ public class JdkDownloadPlugin implements Plugin { private static final String REPO_NAME_PREFIX = "jdk_repo_"; private static final String EXTENSION_NAME = "jdks"; - public static final String JDK_TRIMMED_PREFIX = "(jdk-?\\d.*)|(zulu-?\\d.+).jdk"; - public static final String ZULU_LINUX_AARCH_PATTERN = "zulu.*linux_aarch64"; + public static final String JDK_TRIMMED_PREFIX = "(jdk-?\\d.*)|(zulu-?\\d.*).jdk"; + public static final String ZULU_LINUX_AARCH_PATTERN = "zulu.*_aarch64"; @Override public void apply(Project project) { @@ -66,7 +64,8 @@ public void apply(Project project) { .attribute(jdkAttribute, true); transformSpec.parameters(parameters -> { parameters.setTrimmedPrefixPattern(JDK_TRIMMED_PREFIX); - parameters.setKeepStructureFor(Arrays.asList(ZULU_LINUX_AARCH_PATTERN)); + // parameters.setAsFiletreeOutput(true); + // parameters.setKeepStructureFor(Arrays.asList(ZULU_LINUX_AARCH_PATTERN)); }); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index f988208ab4fec..7c488e6e73fee 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -9,7 +9,8 @@ package org.elasticsearch.gradle.internal; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.JavaVersion; @@ -20,6 +21,7 @@ import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.compile.CompileOptions; import org.gradle.api.tasks.compile.JavaCompile; import org.gradle.api.tasks.javadoc.Javadoc; @@ -47,6 +49,7 @@ import javax.inject.Inject; import static de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin.FORBIDDEN_APIS_TASK_NAME; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; import static org.objectweb.asm.Opcodes.V_PREVIEW; public class MrjarPlugin implements Plugin { @@ -64,6 +67,8 @@ public class MrjarPlugin implements Plugin { @Override public void apply(Project project) { project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); var javaExtension = project.getExtensions().getByType(JavaPluginExtension.class); var isIdeaSync = System.getProperty("idea.sync.active", "false").equals("true"); var ideaSourceSetsEnabled = project.hasProperty(MRJAR_IDEA_ENABLED) && project.property(MRJAR_IDEA_ENABLED).equals("true"); @@ -83,13 +88,14 @@ public void apply(Project project) { String mainSourceSetName = SourceSet.MAIN_SOURCE_SET_NAME + javaVersion; SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion); configureSourceSetInJar(project, mainSourceSet, javaVersion); + addJar(project, mainSourceSet, javaVersion); mainSourceSets.add(mainSourceSetName); testSourceSets.add(mainSourceSetName); String testSourceSetName = SourceSet.TEST_SOURCE_SET_NAME + javaVersion; SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion); testSourceSets.add(testSourceSetName); - createTestTask(project, testSourceSet, javaVersion, mainSourceSets); + createTestTask(project, buildParams, testSourceSet, javaVersion, mainSourceSets); } } @@ -143,6 +149,14 @@ private SourceSet addSourceSet( return sourceSet; } + private void addJar(Project project, SourceSet sourceSet, int javaVersion) { + project.getConfigurations().register("java" + javaVersion); + TaskProvider jarTask = project.getTasks().register("java" + javaVersion + "Jar", Jar.class, task -> { + task.from(sourceSet.getOutput()); + }); + project.getArtifacts().add("java" + javaVersion, jarTask); + } + private void configurePreviewFeatures(Project project, SourceSet sourceSet, int javaVersion) { project.getTasks().withType(JavaCompile.class).named(sourceSet.getCompileJavaTaskName()).configure(compileTask -> { CompileOptions compileOptions = compileTask.getOptions(); @@ -163,7 +177,13 @@ private void configureSourceSetInJar(Project project, SourceSet sourceSet, int j jarTask.configure(task -> task.into("META-INF/versions/" + javaVersion, copySpec -> copySpec.from(sourceSet.getOutput()))); } - private void createTestTask(Project project, SourceSet sourceSet, int javaVersion, List mainSourceSets) { + private void createTestTask( + Project project, + BuildParameterExtension buildParams, + SourceSet sourceSet, + int javaVersion, + List mainSourceSets + ) { var jarTask = project.getTasks().withType(Jar.class).named(JavaPlugin.JAR_TASK_NAME); var testTaskProvider = project.getTasks().register(JavaPlugin.TEST_TASK_NAME + javaVersion, Test.class); testTaskProvider.configure(testTask -> { @@ -180,9 +200,9 @@ private void createTestTask(Project project, SourceSet sourceSet, int javaVersio // only set the jdk if runtime java isn't set because setting the toolchain is incompatible with // runtime java setting the executable directly - if (BuildParams.getIsRuntimeJavaHomeSet()) { + if (buildParams.getIsRuntimeJavaHomeSet()) { testTask.onlyIf("runtime java must support java " + javaVersion, t -> { - JavaVersion runtimeJavaVersion = BuildParams.getRuntimeJavaVersion(); + JavaVersion runtimeJavaVersion = buildParams.getRuntimeJavaVersion().get(); return runtimeJavaVersion.isCompatibleWith(JavaVersion.toVersion(javaVersion)); }); } else { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java index 8f0951da86b88..28776f03d17e8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java @@ -17,7 +17,6 @@ public class InternalElasticsearchDistributionTypes { public static ElasticsearchDistributionType DEB = new DebElasticsearchDistributionType(); public static ElasticsearchDistributionType RPM = new RpmElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER = new DockerElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER_UBI = new DockerUbiElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_WOLFI = new DockerWolfiElasticsearchDistributionType(); @@ -26,7 +25,6 @@ public class InternalElasticsearchDistributionTypes { DEB, RPM, DOCKER, - DOCKER_UBI, DOCKER_IRONBANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java index 84a6432041ed1..7348181c4199c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportPlugin.java @@ -8,6 +8,7 @@ */ package org.elasticsearch.gradle.internal.docker; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; @@ -17,6 +18,8 @@ import java.util.List; import java.util.stream.Collectors; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + /** * Plugin providing {@link DockerSupportService} for detecting Docker installations and determining requirements for Docker-based * Elasticsearch build tasks. @@ -30,11 +33,14 @@ public void apply(Project project) { if (project != project.getRootProject()) { throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project."); } + project.getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); Provider dockerSupportServiceProvider = project.getGradle() .getSharedServices() .registerIfAbsent(DOCKER_SUPPORT_SERVICE_NAME, DockerSupportService.class, spec -> spec.parameters(params -> { params.setExclusionsFile(new File(project.getRootDir(), DOCKER_ON_LINUX_EXCLUSIONS_FILE)); + params.getIsCI().set(buildParams.isCi()); })); // Ensure that if we are trying to run any DockerBuildTask tasks, we assert an available Docker installation exists diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java index 344a477e74ef9..f40f5d932b701 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java @@ -13,10 +13,10 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.GradleException; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.provider.Property; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceParameters; @@ -59,7 +59,6 @@ public abstract class DockerSupportService implements BuildService serviceInfos; private Map> tcpPorts; private Map> udpPorts; @@ -228,7 +227,7 @@ private boolean isExcludedOs() { // We don't attempt to check the current flavor and version of Linux unless we're // running in CI, because we don't want to stop people running the Docker tests in // their own environments if they really want to. - if (BuildParams.isCi() == false) { + if (getParameters().getIsCI().get().booleanValue() == false) { return false; } @@ -356,10 +355,6 @@ public Map> getUdpPorts() { return udpPorts; } - public void setServiceInfos(Map serviceInfos) { - this.serviceInfos = serviceInfos; - } - /** * An immutable class that represents the results of a Docker search from {@link #getDockerAvailability()}}. */ @@ -402,5 +397,7 @@ interface Parameters extends BuildServiceParameters { File getExclusionsFile(); void setExclusionsFile(File exclusionsFile); + + Property getIsCI(); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java new file mode 100644 index 0000000000000..5531194e0abde --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.info; + +import org.elasticsearch.gradle.internal.BwcVersions; +import org.gradle.api.Action; +import org.gradle.api.JavaVersion; +import org.gradle.api.Task; +import org.gradle.api.provider.Property; +import org.gradle.api.provider.Provider; +import org.gradle.api.provider.ProviderFactory; +import org.gradle.jvm.toolchain.JavaToolchainSpec; + +import java.io.File; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; + +public abstract class BuildParameterExtension { + private final Provider inFipsJvm; + private final Provider runtimeJavaHome; + private final Boolean isRuntimeJavaHomeSet; + private final List javaVersions; + private final JavaVersion minimumCompilerVersion; + private final JavaVersion minimumRuntimeVersion; + private final JavaVersion gradleJavaVersion; + private final Provider runtimeJavaVersion; + private final Provider> javaToolChainSpec; + private final Provider runtimeJavaDetails; + private final String gitRevision; + private transient AtomicReference buildDate = new AtomicReference<>(); + private final String testSeed; + private final Boolean isCi; + private final Integer defaultParallel; + private final Boolean isSnapshotBuild; + + public BuildParameterExtension( + ProviderFactory providers, + Provider runtimeJavaHome, + Provider> javaToolChainSpec, + Provider runtimeJavaVersion, + boolean isRuntimeJavaHomeSet, + Provider runtimeJavaDetails, + List javaVersions, + JavaVersion minimumCompilerVersion, + JavaVersion minimumRuntimeVersion, + JavaVersion gradleJavaVersion, + String gitRevision, + String gitOrigin, + ZonedDateTime buildDate, + String testSeed, + boolean isCi, + int defaultParallel, + final boolean isSnapshotBuild, + Provider bwcVersions + ) { + this.inFipsJvm = providers.systemProperty("tests.fips.enabled").map(BuildParameterExtension::parseBoolean); + this.runtimeJavaHome = runtimeJavaHome; + this.javaToolChainSpec = javaToolChainSpec; + this.runtimeJavaVersion = runtimeJavaVersion; + this.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet; + this.runtimeJavaDetails = runtimeJavaDetails; + this.javaVersions = javaVersions; + this.minimumCompilerVersion = minimumCompilerVersion; + this.minimumRuntimeVersion = minimumRuntimeVersion; + this.gradleJavaVersion = gradleJavaVersion; + this.gitRevision = gitRevision; + this.testSeed = testSeed; + this.isCi = isCi; + this.defaultParallel = defaultParallel; + this.isSnapshotBuild = isSnapshotBuild; + this.getBwcVersionsProperty().set(bwcVersions); + this.getGitOriginProperty().set(gitOrigin); + } + + private static boolean parseBoolean(String s) { + if (s == null) { + return false; + } + return Boolean.parseBoolean(s); + } + + public boolean getInFipsJvm() { + return inFipsJvm.getOrElse(false); + } + + public Provider getRuntimeJavaHome() { + return runtimeJavaHome; + } + + public void withFipsEnabledOnly(Task task) { + task.onlyIf("FIPS mode disabled", task1 -> getInFipsJvm() == false); + } + + public Boolean getIsRuntimeJavaHomeSet() { + return isRuntimeJavaHomeSet; + } + + public List getJavaVersions() { + return javaVersions; + } + + public JavaVersion getMinimumCompilerVersion() { + return minimumCompilerVersion; + } + + public JavaVersion getMinimumRuntimeVersion() { + return minimumRuntimeVersion; + } + + public JavaVersion getGradleJavaVersion() { + return gradleJavaVersion; + } + + public Provider getRuntimeJavaVersion() { + return runtimeJavaVersion; + } + + public Provider> getJavaToolChainSpec() { + return javaToolChainSpec; + } + + public Provider getRuntimeJavaDetails() { + return runtimeJavaDetails; + } + + public String getGitRevision() { + return gitRevision; + } + + public String getGitOrigin() { + return getGitOriginProperty().get(); + } + + public ZonedDateTime getBuildDate() { + ZonedDateTime value = buildDate.get(); + if (value == null) { + value = ZonedDateTime.now(ZoneOffset.UTC); + if (buildDate.compareAndSet(null, value) == false) { + // If another thread initialized it first, return the initialized value + value = buildDate.get(); + } + } + return value; + } + + public String getTestSeed() { + return testSeed; + } + + public Boolean isCi() { + return isCi; + } + + public Integer getDefaultParallel() { + return defaultParallel; + } + + public Boolean isSnapshotBuild() { + return isSnapshotBuild; + } + + public BwcVersions getBwcVersions() { + return getBwcVersionsProperty().get(); + } + + public abstract Property getBwcVersionsProperty(); + + public abstract Property getGitOriginProperty(); + + public Random getRandom() { + return new Random(Long.parseUnsignedLong(testSeed.split(":")[0], 16)); + } + + public Boolean isGraalVmRuntime() { + return runtimeJavaDetails.get().toLowerCase().contains("graalvm"); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterService.java new file mode 100644 index 0000000000000..ec1bc4aec1324 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterService.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.info; + +import org.gradle.api.provider.Property; +import org.gradle.api.services.BuildService; +import org.gradle.api.services.BuildServiceParameters; + +public abstract class BuildParameterService implements BuildService, AutoCloseable { + public interface Params extends BuildServiceParameters { + Property getBuildParams(); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java deleted file mode 100644 index d3afeed9f8578..0000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.gradle.internal.info; - -import org.elasticsearch.gradle.internal.BwcVersions; -import org.gradle.api.Action; -import org.gradle.api.JavaVersion; -import org.gradle.api.Task; -import org.gradle.api.provider.Provider; -import org.gradle.jvm.toolchain.JavaToolchainSpec; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Modifier; -import java.time.ZonedDateTime; -import java.util.Arrays; -import java.util.List; -import java.util.Random; -import java.util.function.Consumer; - -import static java.util.Objects.requireNonNull; - -public class BuildParams { - private static Provider runtimeJavaHome; - private static Boolean isRuntimeJavaHomeSet; - private static List javaVersions; - private static JavaVersion minimumCompilerVersion; - private static JavaVersion minimumRuntimeVersion; - private static JavaVersion gradleJavaVersion; - private static Provider runtimeJavaVersion; - private static Provider> javaToolChainSpec; - private static Provider runtimeJavaDetails; - private static Boolean inFipsJvm; - private static String gitRevision; - private static String gitOrigin; - private static ZonedDateTime buildDate; - private static String testSeed; - private static Boolean isCi; - private static Integer defaultParallel; - private static Boolean isSnapshotBuild; - private static Provider bwcVersions; - - /** - * Initialize global build parameters. This method accepts and a initialization function which in turn accepts a - * {@link MutableBuildParams}. Initialization can be done in "stages", therefore changes override existing values, and values from - * previous calls to {@link #init(Consumer)} carry forward. In cases where you want to clear existing values - * {@link MutableBuildParams#reset()} may be used. - * - * @param initializer Build parameter initializer - */ - public static void init(Consumer initializer) { - initializer.accept(MutableBuildParams.INSTANCE); - } - - public static File getRuntimeJavaHome() { - return value(runtimeJavaHome).get(); - } - - public static Boolean getIsRuntimeJavaHomeSet() { - return value(isRuntimeJavaHomeSet); - } - - public static List getJavaVersions() { - return value(javaVersions); - } - - public static JavaVersion getMinimumCompilerVersion() { - return value(minimumCompilerVersion); - } - - public static JavaVersion getMinimumRuntimeVersion() { - return value(minimumRuntimeVersion); - } - - public static JavaVersion getGradleJavaVersion() { - return value(gradleJavaVersion); - } - - public static JavaVersion getRuntimeJavaVersion() { - return value(runtimeJavaVersion.get()); - } - - public static String getRuntimeJavaDetails() { - return value(runtimeJavaDetails.get()); - } - - public static Boolean isInFipsJvm() { - return value(inFipsJvm); - } - - public static void withFipsEnabledOnly(Task task) { - task.onlyIf("FIPS mode disabled", task1 -> isInFipsJvm() == false); - } - - public static String getGitRevision() { - return value(gitRevision); - } - - public static String getGitOrigin() { - return value(gitOrigin); - } - - public static ZonedDateTime getBuildDate() { - return value(buildDate); - } - - public static BwcVersions getBwcVersions() { - return value(bwcVersions).get(); - } - - public static String getTestSeed() { - return value(testSeed); - } - - public static Random getRandom() { - return new Random(Long.parseUnsignedLong(testSeed.split(":")[0], 16)); - } - - public static Boolean isCi() { - return value(isCi); - } - - public static Boolean isGraalVmRuntime() { - return value(runtimeJavaDetails.get().toLowerCase().contains("graalvm")); - } - - public static Integer getDefaultParallel() { - return value(defaultParallel); - } - - public static boolean isSnapshotBuild() { - return value(BuildParams.isSnapshotBuild); - } - - public static Provider> getJavaToolChainSpec() { - return javaToolChainSpec; - } - - private static T value(T object) { - if (object == null) { - String callingMethod = Thread.currentThread().getStackTrace()[2].getMethodName(); - - throw new IllegalStateException( - "Build parameter '" - + propertyName(callingMethod) - + "' has not been initialized.\n" - + "Perhaps the plugin responsible for initializing this property has not been applied." - ); - } - - return object; - } - - private static String propertyName(String methodName) { - String propertyName = methodName.startsWith("is") ? methodName.substring("is".length()) : methodName.substring("get".length()); - return propertyName.substring(0, 1).toLowerCase() + propertyName.substring(1); - } - - public static class MutableBuildParams { - private static MutableBuildParams INSTANCE = new MutableBuildParams(); - - private MutableBuildParams() {} - - /** - * Resets any existing values from previous initializations. - */ - public void reset() { - Arrays.stream(BuildParams.class.getDeclaredFields()).filter(f -> Modifier.isStatic(f.getModifiers())).forEach(f -> { - try { - // Since we are mutating private static fields from a public static inner class we need to suppress - // accessibility controls here. - f.setAccessible(true); - f.set(null, null); - } catch (IllegalAccessException e) { - throw new RuntimeException(e); - } - }); - } - - public void setRuntimeJavaHome(Provider runtimeJavaHome) { - BuildParams.runtimeJavaHome = runtimeJavaHome.map(javaHome -> { - try { - return javaHome.getCanonicalFile(); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - } - - public void setIsRuntimeJavaHomeSet(boolean isRuntimeJavaHomeSet) { - BuildParams.isRuntimeJavaHomeSet = isRuntimeJavaHomeSet; - } - - public void setJavaVersions(List javaVersions) { - BuildParams.javaVersions = requireNonNull(javaVersions); - } - - public void setMinimumCompilerVersion(JavaVersion minimumCompilerVersion) { - BuildParams.minimumCompilerVersion = requireNonNull(minimumCompilerVersion); - } - - public void setMinimumRuntimeVersion(JavaVersion minimumRuntimeVersion) { - BuildParams.minimumRuntimeVersion = requireNonNull(minimumRuntimeVersion); - } - - public void setGradleJavaVersion(JavaVersion gradleJavaVersion) { - BuildParams.gradleJavaVersion = requireNonNull(gradleJavaVersion); - } - - public void setRuntimeJavaVersion(Provider runtimeJavaVersion) { - BuildParams.runtimeJavaVersion = requireNonNull(runtimeJavaVersion); - } - - public void setRuntimeJavaDetails(Provider runtimeJavaDetails) { - BuildParams.runtimeJavaDetails = runtimeJavaDetails; - } - - public void setInFipsJvm(boolean inFipsJvm) { - BuildParams.inFipsJvm = inFipsJvm; - } - - public void setGitRevision(String gitRevision) { - BuildParams.gitRevision = requireNonNull(gitRevision); - } - - public void setGitOrigin(String gitOrigin) { - BuildParams.gitOrigin = requireNonNull(gitOrigin); - } - - public void setBuildDate(ZonedDateTime buildDate) { - BuildParams.buildDate = requireNonNull(buildDate); - } - - public void setTestSeed(String testSeed) { - BuildParams.testSeed = requireNonNull(testSeed); - } - - public void setIsCi(boolean isCi) { - BuildParams.isCi = isCi; - } - - public void setDefaultParallel(int defaultParallel) { - BuildParams.defaultParallel = defaultParallel; - } - - public void setIsSnapshotBuild(final boolean isSnapshotBuild) { - BuildParams.isSnapshotBuild = isSnapshotBuild; - } - - public void setBwcVersions(Provider bwcVersions) { - BuildParams.bwcVersions = requireNonNull(bwcVersions); - } - - public void setJavaToolChainSpec(Provider> javaToolChain) { - BuildParams.javaToolChainSpec = javaToolChain; - } - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 2d5a28bdd6af9..0535026b2594e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -58,6 +58,8 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.conventions.GUtils.elvis; + public class GlobalBuildInfoPlugin implements Plugin { private static final Logger LOGGER = Logging.getLogger(GlobalBuildInfoPlugin.class); private static final String DEFAULT_VERSION_JAVA_FILE_PATH = "server/src/main/java/org/elasticsearch/Version.java"; @@ -67,6 +69,7 @@ public class GlobalBuildInfoPlugin implements Plugin { private final JvmMetadataDetector metadataDetector; private final ProviderFactory providers; private JavaToolchainService toolChainService; + private Project project; @Inject public GlobalBuildInfoPlugin( @@ -87,6 +90,7 @@ public void apply(Project project) { if (project != project.getRootProject()) { throw new IllegalStateException(this.getClass().getName() + " can only be applied to the root project."); } + this.project = project; project.getPlugins().apply(JvmToolchainsPlugin.class); toolChainService = project.getExtensions().getByType(JavaToolchainService.class); GradleVersion minimumGradleVersion = GradleVersion.version(getResourceContents("/minimumGradleVersion")); @@ -98,55 +102,54 @@ public void apply(Project project) { JavaVersion minimumRuntimeVersion = JavaVersion.toVersion(getResourceContents("/minimumRuntimeVersion")); Provider explicitRuntimeJavaHome = findRuntimeJavaHome(); - boolean isExplicitRuntimeJavaHomeSet = explicitRuntimeJavaHome.isPresent(); - Provider actualRuntimeJavaHome = isExplicitRuntimeJavaHomeSet + boolean isRuntimeJavaHomeExplicitlySet = explicitRuntimeJavaHome.isPresent(); + Provider actualRuntimeJavaHome = isRuntimeJavaHomeExplicitlySet ? explicitRuntimeJavaHome : resolveJavaHomeFromToolChainService(VersionProperties.getBundledJdkMajorVersion()); GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir()); - BuildParams.init(params -> { - params.reset(); - params.setRuntimeJavaHome(actualRuntimeJavaHome); - params.setJavaToolChainSpec(resolveToolchainSpecFromEnv()); - Provider runtimeJdkMetaData = actualRuntimeJavaHome.map( - runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)) - ); - params.setRuntimeJavaVersion( + Provider runtimeJdkMetaData = actualRuntimeJavaHome.map( + runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)) + ); + AtomicReference cache = new AtomicReference<>(); + Provider bwcVersionsProvider = providers.provider( + () -> cache.updateAndGet(val -> val == null ? resolveBwcVersions() : val) + ); + BuildParameterExtension buildParams = project.getExtensions() + .create( + "buildParams", + BuildParameterExtension.class, + actualRuntimeJavaHome, + resolveToolchainSpecFromEnv(), actualRuntimeJavaHome.map( javaHome -> determineJavaVersion( "runtime java.home", javaHome, - isExplicitRuntimeJavaHomeSet + isRuntimeJavaHomeExplicitlySet ? minimumRuntimeVersion : JavaVersion.toVersion(VersionProperties.getBundledJdkMajorVersion()) ) - ) - ); - params.setIsRuntimeJavaHomeSet(isExplicitRuntimeJavaHomeSet); - params.setRuntimeJavaDetails(runtimeJdkMetaData.map(m -> formatJavaVendorDetails(m))); - params.setJavaVersions(getAvailableJavaVersions()); - params.setMinimumCompilerVersion(minimumCompilerVersion); - params.setMinimumRuntimeVersion(minimumRuntimeVersion); - params.setGradleJavaVersion(Jvm.current().getJavaVersion()); - params.setGitRevision(gitInfo.getRevision()); - params.setGitOrigin(gitInfo.getOrigin()); - params.setBuildDate(ZonedDateTime.now(ZoneOffset.UTC)); - params.setTestSeed(getTestSeed()); - params.setIsCi( - System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null || System.getProperty("isCI") != null - ); - params.setDefaultParallel(ParallelDetector.findDefaultParallel(project)); - params.setInFipsJvm(Util.getBooleanProperty("tests.fips.enabled", false)); - params.setIsSnapshotBuild(Util.getBooleanProperty("build.snapshot", true)); - AtomicReference cache = new AtomicReference<>(); - params.setBwcVersions( - providers.provider( - () -> cache.updateAndGet( - val -> val == null ? resolveBwcVersions(Util.locateElasticsearchWorkspace(project.getGradle())) : val - ) - ) + ), + isRuntimeJavaHomeExplicitlySet, + runtimeJdkMetaData.map(m -> formatJavaVendorDetails(m)), + getAvailableJavaVersions(), + minimumCompilerVersion, + minimumRuntimeVersion, + Jvm.current().getJavaVersion(), + gitInfo.getRevision(), + gitInfo.getOrigin(), + ZonedDateTime.now(ZoneOffset.UTC), + getTestSeed(), + System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null || System.getProperty("isCI") != null, + ParallelDetector.findDefaultParallel(project), + Util.getBooleanProperty("build.snapshot", true), + bwcVersionsProvider ); + + project.getGradle().getSharedServices().registerIfAbsent("buildParams", BuildParameterService.class, spec -> { + // Provide some parameters + spec.getParameters().getBuildParams().set(buildParams); }); // Enforce the minimum compiler version @@ -155,7 +158,7 @@ public void apply(Project project) { // Print global build info header just before task execution // Only do this if we are the root build of a composite if (GradleUtils.isIncludedBuild(project) == false) { - project.getGradle().getTaskGraph().whenReady(graph -> logGlobalBuildInfo()); + project.getGradle().getTaskGraph().whenReady(graph -> logGlobalBuildInfo(buildParams)); } } @@ -180,9 +183,12 @@ private String formatJavaVendorDetails(JvmInstallationMetadata runtimeJdkMetaDat /* Introspect all versions of ES that may be tested against for backwards * compatibility. It is *super* important that this logic is the same as the * logic in VersionUtils.java. */ - private static BwcVersions resolveBwcVersions(File root) { - File versionsFile = new File(root, DEFAULT_VERSION_JAVA_FILE_PATH); - try (var is = new FileInputStream(versionsFile)) { + private BwcVersions resolveBwcVersions() { + String versionsFilePath = elvis( + System.getProperty("BWC_VERSION_SOURCE"), + new File(Util.locateElasticsearchWorkspace(project.getGradle()), DEFAULT_VERSION_JAVA_FILE_PATH).getPath() + ); + try (var is = new FileInputStream(versionsFilePath)) { List versionLines = IOUtils.readLines(is, "UTF-8"); return new BwcVersions(versionLines); } catch (IOException e) { @@ -190,7 +196,7 @@ private static BwcVersions resolveBwcVersions(File root) { } } - private void logGlobalBuildInfo() { + private void logGlobalBuildInfo(BuildParameterExtension buildParams) { final String osName = System.getProperty("os.name"); final String osVersion = System.getProperty("os.version"); final String osArch = System.getProperty("os.arch"); @@ -202,14 +208,14 @@ private void logGlobalBuildInfo() { LOGGER.quiet("Elasticsearch Build Hamster says Hello!"); LOGGER.quiet(" Gradle Version : " + GradleVersion.current().getVersion()); LOGGER.quiet(" OS Info : " + osName + " " + osVersion + " (" + osArch + ")"); - if (BuildParams.getIsRuntimeJavaHomeSet()) { - JvmInstallationMetadata runtimeJvm = metadataDetector.getMetadata(getJavaInstallation(BuildParams.getRuntimeJavaHome())); + if (buildParams.getIsRuntimeJavaHomeSet()) { + JvmInstallationMetadata runtimeJvm = metadataDetector.getMetadata(getJavaInstallation(buildParams.getRuntimeJavaHome().get())); final String runtimeJvmVendorDetails = runtimeJvm.getVendor().getDisplayName(); final String runtimeJvmImplementationVersion = runtimeJvm.getJvmVersion(); final String runtimeVersion = runtimeJvm.getRuntimeVersion(); final String runtimeExtraDetails = runtimeJvmVendorDetails + ", " + runtimeVersion; LOGGER.quiet(" Runtime JDK Version : " + runtimeJvmImplementationVersion + " (" + runtimeExtraDetails + ")"); - LOGGER.quiet(" Runtime java.home : " + BuildParams.getRuntimeJavaHome()); + LOGGER.quiet(" Runtime java.home : " + buildParams.getRuntimeJavaHome().get()); LOGGER.quiet(" Gradle JDK Version : " + gradleJvmImplementationVersion + " (" + gradleJvmVendorDetails + ")"); LOGGER.quiet(" Gradle java.home : " + gradleJvm.getJavaHome()); } else { @@ -220,8 +226,8 @@ private void logGlobalBuildInfo() { if (javaToolchainHome != null) { LOGGER.quiet(" JAVA_TOOLCHAIN_HOME : " + javaToolchainHome); } - LOGGER.quiet(" Random Testing Seed : " + BuildParams.getTestSeed()); - LOGGER.quiet(" In FIPS 140 mode : " + BuildParams.isInFipsJvm()); + LOGGER.quiet(" Random Testing Seed : " + buildParams.getTestSeed()); + LOGGER.quiet(" In FIPS 140 mode : " + buildParams.getInFipsJvm()); LOGGER.quiet("======================================="); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java index 81ff081ffa82b..dbbe35905d208 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java @@ -19,6 +19,7 @@ import org.gradle.api.plugins.quality.Checkstyle; import org.gradle.api.plugins.quality.CheckstyleExtension; import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.TaskProvider; @@ -42,18 +43,23 @@ public TaskProvider createTask(Project project) { File checkstyleSuppressions = new File(checkstyleDir, "checkstyle_suppressions.xml"); File checkstyleConf = new File(checkstyleDir, "checkstyle.xml"); TaskProvider copyCheckstyleConf = project.getTasks().register("copyCheckstyleConf"); - // configure inputs and outputs so up to date works properly copyCheckstyleConf.configure(t -> t.getOutputs().files(checkstyleSuppressions, checkstyleConf)); if ("jar".equals(checkstyleConfUrl.getProtocol())) { try { JarURLConnection jarURLConnection = (JarURLConnection) checkstyleConfUrl.openConnection(); - copyCheckstyleConf.configure(t -> t.getInputs().file(jarURLConnection.getJarFileURL())); + copyCheckstyleConf.configure( + t -> t.getInputs().file(jarURLConnection.getJarFileURL()).withPathSensitivity(PathSensitivity.RELATIVE) + ); } catch (IOException e) { throw new UncheckedIOException(e); } } else if ("file".equals(checkstyleConfUrl.getProtocol())) { - copyCheckstyleConf.configure(t -> t.getInputs().files(checkstyleConfUrl.getFile(), checkstyleSuppressionsUrl.getFile())); + copyCheckstyleConf.configure( + t -> t.getInputs() + .files(checkstyleConfUrl.getFile(), checkstyleSuppressionsUrl.getFile()) + .withPathSensitivity(PathSensitivity.RELATIVE) + ); } // Explicitly using an Action interface as java lambdas diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTask.java index a198034c3c09b..479b6f431b867 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/FilePermissionsTask.java @@ -19,6 +19,8 @@ import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.StopExecutionException; import org.gradle.api.tasks.TaskAction; @@ -79,6 +81,7 @@ private static boolean isExecutableFile(File file) { @InputFiles @IgnoreEmptyDirectories @SkipWhenEmpty + @PathSensitive(PathSensitivity.RELATIVE) public FileCollection getFiles() { return getSources().get() .stream() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java index f71c86b19a140..f1ec236efe646 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; @@ -30,7 +30,7 @@ public class ForbiddenApisPrecommitPlugin extends PrecommitPlugin { @Override public TaskProvider createTask(Project project) { project.getPluginManager().apply(JavaBasePlugin.class); - + var buildParams = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class); // Create a convenience task for all checks (this does not conflict with extension, as it has higher priority in DSL): var forbiddenTask = project.getTasks() .register(FORBIDDEN_APIS_TASK_NAME, task -> { task.setDescription("Runs forbidden-apis checks."); }); @@ -57,7 +57,7 @@ public TaskProvider createTask(Project project) { t.setClassesDirs(sourceSet.getOutput().getClassesDirs()); t.dependsOn(resourcesTask); t.setClasspath(sourceSet.getRuntimeClasspath().plus(sourceSet.getCompileClasspath())); - t.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion().getMajorVersion()); + t.setTargetCompatibility(buildParams.getMinimumRuntimeVersion().getMajorVersion()); t.getBundledSignatures().set(BUNDLED_SIGNATURE_DEFAULTS); t.setSignaturesFiles( project.files( diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index 80cece6074ab7..f70e25a57e331 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -12,17 +12,19 @@ import org.elasticsearch.gradle.dependencies.CompileOnlyResolvePlugin; import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.component.ModuleComponentIdentifier; +import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskProvider; import java.io.File; import java.nio.file.Path; import static org.elasticsearch.gradle.internal.util.DependenciesUtils.createFileCollectionFromNonTransitiveArtifactsView; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @@ -31,10 +33,14 @@ public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @Override public TaskProvider createTask(Project project) { + project.getRootProject().getPlugins().apply(CompileOnlyResolvePlugin.class); + Property buildParams = loadBuildParams(project); + project.getPlugins().apply(CompileOnlyResolvePlugin.class); project.getConfigurations().create("forbiddenApisCliJar"); project.getDependencies().add("forbiddenApisCliJar", "de.thetaphi:forbiddenapis:3.6"); Configuration jdkJarHellConfig = project.getConfigurations().create(JDK_JAR_HELL_CONFIG_NAME); + if (project.getPath().equals(LIBS_ELASTICSEARCH_CORE_PROJECT_PATH) == false) { // Internal projects are not all plugins, so make sure the check is available // we are not doing this for this project itself to avoid jar hell with itself @@ -66,9 +72,12 @@ public TaskProvider createTask(Project project) { && ((ModuleComponentIdentifier) identifier).getGroup().startsWith("org.elasticsearch") == false ) ); + if (buildParams.get().getIsRuntimeJavaHomeSet()) { + t.getRuntimeJavaVersion().set(buildParams.get().getRuntimeJavaVersion()); + } t.dependsOn(resourcesTask); - t.getTargetCompatibility().set(project.provider(BuildParams::getRuntimeJavaVersion)); - t.getJavaHome().set(project.provider(BuildParams::getRuntimeJavaHome).map(File::getPath)); + t.getTargetCompatibility().set(buildParams.flatMap(params -> params.getRuntimeJavaVersion())); + t.getJavaHome().set(buildParams.flatMap(params -> params.getRuntimeJavaHome()).map(File::getPath)); t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile()); t.getJdkJarHellClasspath().from(jdkJarHellConfig); t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 7afee8acdd4d2..442797775de2f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -13,7 +13,6 @@ import org.apache.commons.io.output.NullOutputStream; import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.DefaultTask; import org.gradle.api.JavaVersion; import org.gradle.api.file.ArchiveOperations; @@ -194,6 +193,10 @@ public Set getMissingClassExcludes() { @SkipWhenEmpty public abstract ConfigurableFileCollection getJarsToScan(); + @Input + @Optional + public abstract Property getRuntimeJavaVersion(); + @Classpath public FileCollection getClasspath() { return classpath; @@ -371,14 +374,10 @@ private String runForbiddenAPIsCli() throws IOException { /** Returns true iff the build Java version is the same as the given version. */ private boolean isJavaVersion(JavaVersion version) { - if (BuildParams.getIsRuntimeJavaHomeSet()) { - if (version.equals(BuildParams.getRuntimeJavaVersion())) { - return true; - } - } else if (version.getMajorVersion().equals(VersionProperties.getBundledJdkMajorVersion())) { - return true; + if (getRuntimeJavaVersion().isPresent()) { + return getRuntimeJavaVersion().get().equals(version); } - return false; + return version.getMajorVersion().equals(VersionProperties.getBundledJdkMajorVersion()); } private Set runJdkJarHellCheck() throws IOException { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java index 31c6b503d7328..b19c1207d56fb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/GenerateSnykDependencyGraph.java @@ -11,7 +11,6 @@ import groovy.json.JsonOutput; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.artifacts.Configuration; @@ -118,7 +117,7 @@ private Map> projectAttributesData() { } private Object buildTargetData() { - return Map.of("remoteUrl", remoteUrl.get(), "branch", BuildParams.getGitRevision()); + return Map.of("remoteUrl", remoteUrl.get(), "branch", getGitRevision().get()); } @InputFiles @@ -160,4 +159,9 @@ public Property getRemoteUrl() { public Property getTargetReference() { return targetReference; } + + @Input + public Property getGitRevision() { + return targetReference; + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java index b3e3d7f7c004e..fa10daf8dfaaa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/snyk/SnykDependencyMonitoringGradlePlugin.java @@ -10,17 +10,22 @@ package org.elasticsearch.gradle.internal.snyk; import org.elasticsearch.gradle.internal.conventions.info.GitInfo; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.ProjectLayout; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.plugins.JavaPluginExtension; +import org.gradle.api.provider.Property; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.SourceSet; import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class SnykDependencyMonitoringGradlePlugin implements Plugin { public static final String UPLOAD_TASK_NAME = "uploadSnykDependencyGraph"; @@ -35,10 +40,14 @@ public SnykDependencyMonitoringGradlePlugin(ProjectLayout projectLayout, Provide @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + Property buildParams = loadBuildParams(project); + var generateTaskProvider = project.getTasks() .register("generateSnykDependencyGraph", GenerateSnykDependencyGraph.class, generateSnykDependencyGraph -> { generateSnykDependencyGraph.getProjectPath().set(project.getPath()); generateSnykDependencyGraph.getProjectName().set(project.getName()); + generateSnykDependencyGraph.getGitRevision().set(buildParams.get().getGitRevision()); String projectVersion = project.getVersion().toString(); generateSnykDependencyGraph.getVersion().set(projectVersion); generateSnykDependencyGraph.getGradleVersion().set(project.getGradle().getGradleVersion()); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataPlugin.java similarity index 83% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataPlugin.java index be972f11d4586..0c8a99fa82398 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataPlugin.java @@ -21,10 +21,10 @@ import java.util.Map; /** - * Extracts historical feature metadata into a machine-readable format for use in backward compatibility testing. + * Extracts cluster feature metadata into a machine-readable format for use in backward compatibility testing. */ -public class HistoricalFeaturesMetadataPlugin implements Plugin { - public static final String HISTORICAL_FEATURES_JSON = "historical-features.json"; +public class ClusterFeaturesMetadataPlugin implements Plugin { + public static final String CLUSTER_FEATURES_JSON = "cluster-features.json"; public static final String FEATURES_METADATA_TYPE = "features-metadata-json"; public static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadata"; @@ -40,13 +40,13 @@ public void apply(Project project) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet mainSourceSet = sourceSets.getByName(SourceSet.MAIN_SOURCE_SET_NAME); - TaskProvider generateTask = project.getTasks() - .register("generateHistoricalFeaturesMetadata", HistoricalFeaturesMetadataTask.class, task -> { + TaskProvider generateTask = project.getTasks() + .register("generateClusterFeaturesMetadata", ClusterFeaturesMetadataTask.class, task -> { task.setClasspath( featureMetadataExtractorConfig.plus(mainSourceSet.getRuntimeClasspath()) .plus(project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)) ); - task.getOutputFile().convention(project.getLayout().getBuildDirectory().file(HISTORICAL_FEATURES_JSON)); + task.getOutputFile().convention(project.getLayout().getBuildDirectory().file(CLUSTER_FEATURES_JSON)); }); Configuration featuresMetadataArtifactConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataTask.java similarity index 81% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataTask.java index a2ea7af210dfd..aa4f90e4d2367 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/HistoricalFeaturesMetadataTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ClusterFeaturesMetadataTask.java @@ -26,7 +26,7 @@ import javax.inject.Inject; @CacheableTask -public abstract class HistoricalFeaturesMetadataTask extends DefaultTask { +public abstract class ClusterFeaturesMetadataTask extends DefaultTask { private FileCollection classpath; @OutputFile @@ -46,30 +46,30 @@ public void setClasspath(FileCollection classpath) { @TaskAction public void execute() { - getWorkerExecutor().noIsolation().submit(HistoricalFeaturesMetadataWorkAction.class, params -> { + getWorkerExecutor().noIsolation().submit(ClusterFeaturesMetadataWorkAction.class, params -> { params.getClasspath().setFrom(getClasspath()); params.getOutputFile().set(getOutputFile()); }); } - public interface HistoricalFeaturesWorkParameters extends WorkParameters { + public interface ClusterFeaturesWorkParameters extends WorkParameters { ConfigurableFileCollection getClasspath(); RegularFileProperty getOutputFile(); } - public abstract static class HistoricalFeaturesMetadataWorkAction implements WorkAction { + public abstract static class ClusterFeaturesMetadataWorkAction implements WorkAction { private final ExecOperations execOperations; @Inject - public HistoricalFeaturesMetadataWorkAction(ExecOperations execOperations) { + public ClusterFeaturesMetadataWorkAction(ExecOperations execOperations) { this.execOperations = execOperations; } @Override public void execute() { LoggedExec.javaexec(execOperations, spec -> { - spec.getMainClass().set("org.elasticsearch.extractor.features.HistoricalFeaturesMetadataExtractor"); + spec.getMainClass().set("org.elasticsearch.extractor.features.ClusterFeaturesMetadataExtractor"); spec.classpath(getParameters().getClasspath()); spec.args(getParameters().getOutputFile().get().getAsFile().getAbsolutePath()); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 8e7884888b63b..211718c151ba9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -16,11 +16,11 @@ import org.elasticsearch.gradle.ElasticsearchDistributionType; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.internal.BwcVersions; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.JdkDownloadPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; -import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Action; @@ -51,9 +51,9 @@ import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_IRONBANK; -import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_UBI; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_WOLFI; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.RPM; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * This class defines gradle tasks for testing our various distribution artifacts. @@ -72,6 +72,7 @@ public void apply(Project project) { project.getPlugins().apply(InternalDistributionDownloadPlugin.class); project.getPlugins().apply(JdkDownloadPlugin.class); project.getPluginManager().apply("elasticsearch.java"); + var buildParams = loadBuildParams(project).get(); Provider dockerSupport = GradleUtils.getBuildService( project.getGradle().getSharedServices(), @@ -84,7 +85,7 @@ public void apply(Project project) { List testDistributions = configureDistributions(project); Map> lifecycleTasks = lifecycleTasks(project, "destructiveDistroTest"); - Map> versionTasks = versionTasks(project, "destructiveDistroUpgradeTest"); + Map> versionTasks = versionTasks(project, "destructiveDistroUpgradeTest", buildParams.getBwcVersions()); TaskProvider destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); Configuration examplePlugin = configureExamplePlugin(project); @@ -115,7 +116,7 @@ public void apply(Project project) { lifecycleTask.configure(t -> t.dependsOn(destructiveTask)); if ((type == DEB || type == RPM) && distribution.getBundledJdk()) { - for (Version version : BuildParams.getBwcVersions().getIndexCompatible()) { + for (Version version : buildParams.getBwcVersions().getIndexCompatible()) { final ElasticsearchDistribution bwcDistro; if (version.equals(Version.fromString(distribution.getVersion()))) { // this is the same as the distribution we are testing @@ -146,7 +147,6 @@ public void apply(Project project) { private static Map> lifecycleTasks(Project project, String taskPrefix) { Map> lifecyleTasks = new HashMap<>(); lifecyleTasks.put(DOCKER, project.getTasks().register(taskPrefix + ".docker")); - lifecyleTasks.put(DOCKER_UBI, project.getTasks().register(taskPrefix + ".docker-ubi")); lifecyleTasks.put(DOCKER_IRONBANK, project.getTasks().register(taskPrefix + ".docker-ironbank")); lifecyleTasks.put(DOCKER_CLOUD_ESS, project.getTasks().register(taskPrefix + ".docker-cloud-ess")); lifecyleTasks.put(DOCKER_WOLFI, project.getTasks().register(taskPrefix + ".docker-wolfi")); @@ -156,10 +156,10 @@ private static Map> lifecycleTask return lifecyleTasks; } - private static Map> versionTasks(Project project, String taskPrefix) { + private static Map> versionTasks(Project project, String taskPrefix, BwcVersions bwcVersions) { Map> versionTasks = new HashMap<>(); - for (Version version : BuildParams.getBwcVersions().getIndexCompatible()) { + for (Version version : bwcVersions.getIndexCompatible()) { versionTasks.put(version.toString(), project.getTasks().register(taskPrefix + ".v" + version)); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java index 3619c9c1ec76d..e13c2544ae9cf 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/InternalClusterTestPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.gradle.internal.test; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; @@ -18,16 +18,21 @@ import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.testing.Test; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class InternalClusterTestPlugin implements Plugin { public static final String SOURCE_SET_NAME = "internalClusterTest"; @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); + TaskProvider internalClusterTest = GradleUtils.addTestSourceSet(project, SOURCE_SET_NAME); internalClusterTest.configure(task -> { // Set GC options to mirror defaults in jvm.options - if (BuildParams.getRuntimeJavaVersion().compareTo(JavaVersion.VERSION_14) < 0) { + if (buildParams.getRuntimeJavaVersion().get().compareTo(JavaVersion.VERSION_14) < 0) { task.jvmArgs("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", "-XX:+UseCMSInitiatingOccupancyOnly"); } else { task.jvmArgs("-XX:+UseG1GC"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java index fddddbd14d3ab..c13a5f0e4d30d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.gradle.internal.test; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.file.RegularFile; @@ -19,6 +19,8 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class MutedTestPlugin implements Plugin { private static final String ADDITIONAL_FILES_PROPERTY = "org.elasticsearch.additional.muted.tests"; @@ -32,6 +34,9 @@ public void apply(Project project) { .map(p -> project.getRootProject().getLayout().getProjectDirectory().file(p)) .toList(); + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); + Provider mutedTestsProvider = project.getGradle() .getSharedServices() .registerIfAbsent("mutedTests", MutedTestsBuildService.class, spec -> { @@ -46,7 +51,7 @@ public void apply(Project project) { } // Don't fail when all tests are ignored when running in CI - filter.setFailOnNoMatchingTests(BuildParams.isCi() == false); + filter.setFailOnNoMatchingTests(buildParams.isCi() == false); }); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestsBuildService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestsBuildService.java index 1dfa3bbb29aa2..df3d1c9b70a94 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestsBuildService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/MutedTestsBuildService.java @@ -28,10 +28,12 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; public abstract class MutedTestsBuildService implements BuildService { - private final List excludePatterns = new ArrayList<>(); + private final Set excludePatterns = new LinkedHashSet<>(); private final ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()); public MutedTestsBuildService() { @@ -43,23 +45,23 @@ public MutedTestsBuildService() { } } - public List getExcludePatterns() { + public Set getExcludePatterns() { return excludePatterns; } - private List buildExcludePatterns(File file) { + private Set buildExcludePatterns(File file) { List mutedTests; try (InputStream is = new BufferedInputStream(new FileInputStream(file))) { mutedTests = objectMapper.readValue(is, MutedTests.class).getTests(); if (mutedTests == null) { - return Collections.emptyList(); + return Collections.emptySet(); } } catch (IOException e) { throw new UncheckedIOException(e); } - List excludes = new ArrayList<>(); + Set excludes = new LinkedHashSet<>(); if (mutedTests.isEmpty() == false) { for (MutedTestsBuildService.MutedTest mutedTest : mutedTests) { if (mutedTest.getClassName() != null && mutedTest.getMethods().isEmpty() == false) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithDependenciesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithDependenciesPlugin.java index 487fe012a5941..a2851bfa2ae55 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithDependenciesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithDependenciesPlugin.java @@ -10,11 +10,14 @@ package org.elasticsearch.gradle.internal.test; import org.apache.commons.lang.StringUtils; -import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.Dependency; import org.gradle.api.artifacts.ProjectDependency; +import org.gradle.api.artifacts.dsl.DependencyHandler; +import org.gradle.api.attributes.Attribute; +import org.gradle.api.attributes.LibraryElements; import org.gradle.api.plugins.ExtraPropertiesExtension; import org.gradle.api.tasks.Copy; import org.gradle.api.tasks.SourceSetContainer; @@ -45,23 +48,31 @@ public void apply(final Project project) { Configuration testImplementationConfig = project.getConfigurations().getByName("testImplementation"); testImplementationConfig.getDependencies().all(dep -> { - if (dep instanceof ProjectDependency - && ((ProjectDependency) dep).getDependencyProject().getPlugins().hasPlugin(PluginBuildPlugin.class)) { - project.getGradle() - .projectsEvaluated(gradle -> addPluginResources(project, ((ProjectDependency) dep).getDependencyProject())); + if (dep instanceof ProjectDependency && dep.getGroup().contains("plugin")) { + addPluginResources(project, ((ProjectDependency) dep)); } }); } - private static void addPluginResources(final Project project, final Project pluginProject) { - final File outputDir = new File(project.getBuildDir(), "/generated-test-resources/" + pluginProject.getName()); - String camelProjectName = stream(pluginProject.getName().split("-")).map(t -> StringUtils.capitalize(t)) + private static void addPluginResources(final Project project, final ProjectDependency projectDependency) { + final File outputDir = new File(project.getBuildDir(), "/generated-test-resources/" + projectDependency.getName()); + String camelProjectName = stream(projectDependency.getName().split("-")).map(t -> StringUtils.capitalize(t)) .collect(Collectors.joining()); String taskName = "copy" + camelProjectName + "Metadata"; + String metadataConfiguration = "resolved" + camelProjectName + "Metadata"; + Configuration pluginMetadata = project.getConfigurations().maybeCreate(metadataConfiguration); + pluginMetadata.getAttributes().attribute(Attribute.of("pluginMetadata", Boolean.class), true); + pluginMetadata.getAttributes() + .attribute( + LibraryElements.LIBRARY_ELEMENTS_ATTRIBUTE, + project.getObjects().named(LibraryElements.class, LibraryElements.RESOURCES) + ); + DependencyHandler dependencyHandler = project.getDependencies(); + Dependency pluginMetadataDependency = dependencyHandler.project(Map.of("path", projectDependency.getPath())); + dependencyHandler.add(metadataConfiguration, pluginMetadataDependency); project.getTasks().register(taskName, Copy.class, copy -> { copy.into(outputDir); - copy.from(pluginProject.getTasks().named("pluginProperties")); - copy.from(pluginProject.file("src/main/plugin-metadata")); + copy.from(pluginMetadata); }); Map map = Map.of("builtBy", taskName); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java index 564465fbb2554..68711881b02f4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java @@ -11,7 +11,7 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.conventions.util.Util; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; import org.elasticsearch.gradle.internal.precommit.FilePermissionsPrecommitPlugin; import org.elasticsearch.gradle.internal.precommit.ForbiddenPatternsPrecommitPlugin; import org.elasticsearch.gradle.internal.precommit.ForbiddenPatternsTask; @@ -35,6 +35,7 @@ public class TestWithSslPlugin implements Plugin { @Override public void apply(Project project) { File keyStoreDir = new File(project.getBuildDir(), "keystore"); + BuildParameterExtension buildParams = project.getRootProject().getExtensions().getByType(BuildParameterExtension.class); TaskProvider exportKeyStore = project.getTasks() .register("copyTestCertificates", ExportElasticsearchBuildResourcesTask.class, (t) -> { t.copy("test/ssl/test-client.crt"); @@ -87,7 +88,7 @@ public void apply(Project project) { .getExtensions() .getByName(TestClustersPlugin.EXTENSION_NAME); clusters.configureEach(c -> { - if (BuildParams.isInFipsJvm()) { + if (buildParams.getInFipsJvm()) { c.setting("xpack.security.transport.ssl.key", "test-node.key"); c.keystore("xpack.security.transport.ssl.secure_key_passphrase", "test-node-key-password"); c.setting("xpack.security.transport.ssl.certificate", "test-node.crt"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java index 02309bb9c1811..6890cfb652952 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java @@ -24,6 +24,8 @@ import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; import org.gradle.api.tasks.util.PatternFilterable; @@ -106,6 +108,7 @@ public Map getSubstitutions() { @SkipWhenEmpty @IgnoreEmptyDirectories @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) public FileTree getInputDir() { FileTree coreFileTree = null; FileTree xpackFileTree = null; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 777a6d931e50e..559c0f60abc08 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -20,9 +20,8 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.test.ClusterFeaturesMetadataPlugin; import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener; -import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; @@ -44,6 +43,7 @@ import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; +import org.gradle.api.internal.artifacts.dependencies.ProjectDependencyInternal; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.ClasspathNormalizer; import org.gradle.api.tasks.PathSensitivity; @@ -58,6 +58,8 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + /** * Base plugin used for wiring up build tasks to REST testing tasks using new JUnit rule-based test clusters framework. */ @@ -92,6 +94,7 @@ public RestTestBasePlugin(ProviderFactory providerFactory) { public void apply(Project project) { project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); + var bwcVersions = loadBuildParams(project).get().getBwcVersions(); // Register integ-test and default distributions ElasticsearchDistribution defaultDistro = createDistribution( @@ -113,12 +116,12 @@ public void apply(Project project) { extractedPluginsConfiguration.extendsFrom(pluginsConfiguration); configureArtifactTransforms(project); - // Create configuration for aggregating historical feature metadata + // Create configuration for aggregating feature metadata FileCollection featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { c.setCanBeConsumed(false); c.setCanBeResolved(true); c.attributes( - a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ClusterFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) ); c.defaultDependencies(d -> d.add(project.getDependencies().project(Map.of("path", ":server")))); c.withDependencies(dependencies -> { @@ -133,10 +136,7 @@ public void apply(Project project) { c.setCanBeConsumed(false); c.setCanBeResolved(true); c.attributes( - a -> a.attribute( - ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, - HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE - ) + a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ClusterFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) ); c.defaultDependencies( d -> d.add(project.getDependencies().project(Map.of("path", ":distribution", "configuration", "featuresMetadata"))) @@ -176,7 +176,7 @@ public void apply(Project project) { task.systemProperty("tests.system_call_filter", "false"); // Pass minimum wire compatible version which is used by upgrade tests - task.systemProperty(MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP, BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + task.systemProperty(MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP, bwcVersions.getMinimumWireCompatibleVersion()); // Register plugins and modules as task inputs and pass paths as system properties to tests var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); @@ -223,7 +223,7 @@ public Void call(Object... args) { } Version version = (Version) args[0]; - boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; + boolean isReleased = bwcVersions.unreleasedInfo(version) == null; String versionString = version.toString(); ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); @@ -235,9 +235,9 @@ public Void call(Object... args) { providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) ); - if (version.getMajor() > 0 && version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { + if (version.getMajor() > 0 && version.before(bwcVersions.getMinimumWireCompatibleVersion())) { // If we are upgrade testing older versions we also need to upgrade to 7.last - this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + this.call(bwcVersions.getMinimumWireCompatibleVersion()); } return null; } @@ -249,7 +249,7 @@ private void copyDependencies(Project project, DependencySet dependencies, Confi configuration.getDependencies() .stream() .filter(d -> d instanceof ProjectDependency) - .map(d -> project.getDependencies().project(Map.of("path", ((ProjectDependency) d).getDependencyProject().getPath()))) + .map(d -> project.getDependencies().project(Map.of("path", ((ProjectDependencyInternal) d).getPath()))) .forEach(dependencies::add); } @@ -326,8 +326,9 @@ private Configuration createPluginConfiguration(Project project, String name, bo Collection additionalDependencies = new LinkedHashSet<>(); for (Iterator iterator = dependencies.iterator(); iterator.hasNext();) { Dependency dependency = iterator.next(); + // this logic of relying on other projects metadata should probably live in a build service if (dependency instanceof ProjectDependency projectDependency) { - Project dependencyProject = projectDependency.getDependencyProject(); + Project dependencyProject = project.project(projectDependency.getPath()); List extendedPlugins = dependencyProject.getExtensions() .getByType(PluginPropertiesExtension.class) .getExtendedPlugins(); @@ -337,8 +338,8 @@ private Configuration createPluginConfiguration(Project project, String name, bo iterator.remove(); additionalDependencies.add( useExploded - ? getExplodedBundleDependency(project, dependencyProject.getPath()) - : getBundleZipTaskDependency(project, dependencyProject.getPath()) + ? getExplodedBundleDependency(project, projectDependency.getPath()) + : getBundleZipTaskDependency(project, projectDependency.getPath()) ); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java index fe305b8b46cf7..ca669276123b3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java @@ -11,7 +11,8 @@ import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask; import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask; import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; @@ -25,6 +26,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; import org.gradle.api.file.Directory; +import org.gradle.api.file.FileCollection; import org.gradle.api.file.ProjectLayout; import org.gradle.api.file.RelativePath; import org.gradle.api.internal.file.FileOperations; @@ -47,6 +49,7 @@ import javax.inject.Inject; import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupYamlRestTestDependenciesDefaults; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; /** * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. @@ -74,6 +77,8 @@ public AbstractYamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperati @Override public void apply(Project project) { + project.getRootProject().getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + BuildParameterExtension buildParams = loadBuildParams(project).get(); final Path compatRestResourcesDir = Path.of("restResources").resolve("compat"); final Path compatSpecsDir = compatRestResourcesDir.resolve("yamlSpecs"); @@ -91,14 +96,14 @@ public void apply(Project project) { GradleUtils.extendSourceSet(project, YamlRestTestPlugin.YAML_REST_TEST, SOURCE_SET_NAME); // determine the previous rest compatibility version and BWC project path - int currentMajor = BuildParams.getBwcVersions().getCurrentVersion().getMajor(); - Version lastMinor = BuildParams.getBwcVersions() + int currentMajor = buildParams.getBwcVersions().getCurrentVersion().getMajor(); + Version lastMinor = buildParams.getBwcVersions() .getUnreleased() .stream() .filter(v -> v.getMajor() == currentMajor - 1) .min(Comparator.reverseOrder()) .get(); - String lastMinorProjectPath = BuildParams.getBwcVersions().unreleasedInfo(lastMinor).gradleProjectPath(); + String lastMinorProjectPath = buildParams.getBwcVersions().unreleasedInfo(lastMinor).gradleProjectPath(); // copy compatible rest specs Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); @@ -240,10 +245,11 @@ public void apply(Project project) { yamlRestCompatTestTask.configure(testTask -> { testTask.systemProperty("tests.restCompat", true); // Use test runner and classpath from "normal" yaml source set + FileCollection outputFileCollection = yamlCompatTestSourceSet.getOutput(); testTask.setTestClassesDirs( yamlTestSourceSet.getOutput().getClassesDirs().plus(yamlCompatTestSourceSet.getOutput().getClassesDirs()) ); - testTask.onlyIf("Compatibility tests are available", t -> yamlCompatTestSourceSet.getOutput().isEmpty() == false); + testTask.onlyIf("Compatibility tests are available", t -> outputFileCollection.isEmpty() == false); testTask.setClasspath( yamlCompatTestSourceSet.getRuntimeClasspath() // remove the "normal" api and tests diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java index ef93dafa913cd..ba242a8e23861 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java @@ -137,7 +137,7 @@ public void skipTest(String fullTestName, String reason) { // However, the folder can be arbitrarily nest so, a == a1/a2/a3, and the test name can include forward slashes, so c == c1/c2/c3 // So we also need to support a1/a2/a3/b/c1/c2/c3 - String[] testParts = fullTestName.split("/"); + String[] testParts = fullTestName.split("/", 3); if (testParts.length < 3) { throw new IllegalArgumentException( "To skip tests, all 3 parts [folder/file/test name] must be defined. found [" + fullTestName + "]" diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java index 53fb4c61e151c..a934164d11af6 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesDeployPlugin.java @@ -12,7 +12,7 @@ import org.apache.commons.lang.StringUtils; import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.internal.docker.DockerBuildTask; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -20,6 +20,8 @@ import java.util.Arrays; import java.util.List; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class TestFixturesDeployPlugin implements Plugin { public static final String DEPLOY_FIXTURE_TASK_NAME = "deployFixtureDockerImages"; @@ -27,13 +29,19 @@ public class TestFixturesDeployPlugin implements Plugin { @Override public void apply(Project project) { + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); NamedDomainObjectContainer fixtures = project.container(TestFixtureDeployment.class); project.getExtensions().add("dockerFixtures", fixtures); - registerDeployTaskPerFixture(project, fixtures); + registerDeployTaskPerFixture(project, fixtures, buildParams.isCi()); project.getTasks().register(DEPLOY_FIXTURE_TASK_NAME, task -> task.dependsOn(project.getTasks().withType(DockerBuildTask.class))); } - private static void registerDeployTaskPerFixture(Project project, NamedDomainObjectContainer fixtures) { + private static void registerDeployTaskPerFixture( + Project project, + NamedDomainObjectContainer fixtures, + boolean isCi + ) { fixtures.all( fixture -> project.getTasks() .register("deploy" + StringUtils.capitalize(fixture.getName()) + "DockerImage", DockerBuildTask.class, task -> { @@ -42,12 +50,12 @@ private static void registerDeployTaskPerFixture(Project project, NamedDomainObj if (baseImages.isEmpty() == false) { task.setBaseImages(baseImages.toArray(new String[baseImages.size()])); } - task.setNoCache(BuildParams.isCi()); + task.setNoCache(isCi); task.setTags( new String[] { resolveTargetDockerRegistry(fixture) + "/" + fixture.getName() + "-fixture:" + fixture.getVersion().get() } ); - task.getPush().set(BuildParams.isCi()); + task.getPush().set(isCi); task.getPlatforms().addAll(Arrays.stream(Architecture.values()).map(a -> a.dockerPlatform).toList()); task.setGroup("Deploy TestFixtures"); task.setDescription("Deploys the " + fixture.getName() + " test fixture"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index 504b081fd505d..ab28a66d93065 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -17,7 +17,7 @@ import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin; import org.elasticsearch.gradle.internal.docker.DockerSupportService; -import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Action; @@ -47,6 +47,8 @@ import javax.inject.Inject; +import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; + public class TestFixturesPlugin implements Plugin { private static final Logger LOGGER = Logging.getLogger(TestFixturesPlugin.class); @@ -68,6 +70,8 @@ protected FileSystemOperations getFileSystemOperations() { @Override public void apply(Project project) { project.getRootProject().getPluginManager().apply(DockerSupportPlugin.class); + project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); + var buildParams = loadBuildParams(project).get(); TaskContainer tasks = project.getTasks(); Provider dockerComposeThrottle = project.getGradle() @@ -127,7 +131,7 @@ public void apply(Project project) { tasks.withType(ComposeUp.class).named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions - if (BuildParams.isCi()) { + if (buildParams.isCi()) { t.usesService(dockerComposeThrottle); t.usesService(dockerSupport); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AbstractCustomJavaToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AbstractCustomJavaToolchainResolver.java index ac458a632e818..0c6a6bc26156b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AbstractCustomJavaToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/AbstractCustomJavaToolchainResolver.java @@ -34,6 +34,7 @@ static String toArchString(Architecture architecture) { case X86_64 -> "x64"; case AARCH64 -> "aarch64"; case X86 -> "x86"; + default -> throw new UnsupportedOperationException("Architecture " + architecture); }; } diff --git a/server/src/main/java/org/elasticsearch/inference/ChunkingOptions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java similarity index 65% rename from server/src/main/java/org/elasticsearch/inference/ChunkingOptions.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java index 5953e2cb44ebf..1b019a6cbd3e6 100644 --- a/server/src/main/java/org/elasticsearch/inference/ChunkingOptions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/CiUtils.java @@ -7,13 +7,12 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.inference; +package org.elasticsearch.gradle.internal.util; -import org.elasticsearch.core.Nullable; +public class CiUtils { -public record ChunkingOptions(@Nullable Integer windowSize, @Nullable Integer span) { - - public boolean settingsArePresent() { - return windowSize != null || span != null; + static String safeName(String input) { + return input.replaceAll("[^a-zA-Z0-9_\\-\\.]+", " ").trim().replaceAll(" ", "_").toLowerCase(); } + } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java new file mode 100644 index 0000000000000..0afe654bc5fbc --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/ParamsUtils.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.util; + +import org.elasticsearch.gradle.internal.info.BuildParameterExtension; +import org.elasticsearch.gradle.internal.info.BuildParameterService; +import org.gradle.api.Project; +import org.gradle.api.provider.Property; +import org.gradle.api.services.BuildServiceRegistration; + +public class ParamsUtils { + + public static Property loadBuildParams(Project project) { + BuildServiceRegistration buildParamsRegistrations = (BuildServiceRegistration< + BuildParameterService, + BuildParameterService.Params>) project.getGradle().getSharedServices().getRegistrations().getByName("buildParams"); + Property buildParams = buildParamsRegistrations.getParameters().getBuildParams(); + return buildParams; + } + +} diff --git a/build-tools-internal/src/main/resources/checkstyle.xml b/build-tools-internal/src/main/resources/checkstyle.xml index daedc2ac3c629..9ed31d993909e 100644 --- a/build-tools-internal/src/main/resources/checkstyle.xml +++ b/build-tools-internal/src/main/resources/checkstyle.xml @@ -57,7 +57,7 @@ unfair. --> - + diff --git a/build-tools-internal/src/main/resources/fips_java.policy b/build-tools-internal/src/main/resources/fips_java.policy index c259b0bc908d8..781e1247db7a5 100644 --- a/build-tools-internal/src/main/resources/fips_java.policy +++ b/build-tools-internal/src/main/resources/fips_java.policy @@ -5,6 +5,7 @@ grant { permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; + permission java.security.SecurityPermission "getProperty.org.bouncycastle.ec.max_f2m_field_size"; permission java.lang.RuntimePermission "getProtectionDomain"; permission java.util.PropertyPermission "java.runtime.name", "read"; permission org.bouncycastle.crypto.CryptoServicesPermission "tlsAlgorithmsEnabled"; @@ -20,6 +21,6 @@ grant { }; // rely on the caller's socket permissions, the JSSE TLS implementation here is always allowed to connect -grant codeBase "file:${jdk.module.path}/bctls-fips-1.0.17.jar" { +grant codeBase "file:${jdk.module.path}/bctls-fips-1.0.19.jar" { permission java.net.SocketPermission "*", "connect"; }; diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 5388f942be8d7..a9da7995c2b36 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -119,7 +119,7 @@ java.time.zone.ZoneRules#getStandardOffset(java.time.Instant) java.time.zone.ZoneRules#getDaylightSavings(java.time.Instant) java.time.zone.ZoneRules#isDaylightSavings(java.time.Instant) -@defaultMessage Use logger methods with non-Object parameter +@defaultMessage The first parameter to a log4j log statement should be a String, a log4j Supplier (not java.util.function.Supplier), or another object that log4j supports. org.apache.logging.log4j.Logger#trace(java.lang.Object) org.apache.logging.log4j.Logger#trace(java.lang.Object, java.lang.Throwable) org.apache.logging.log4j.Logger#debug(java.lang.Object) diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index dd78a707858a7..876e3136ea819 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.10.2 \ No newline at end of file +8.11.1 \ No newline at end of file diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy index c5b7a44a19d31..9c7d20d84a670 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy @@ -17,232 +17,201 @@ import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo class BwcVersionsSpec extends Specification { List versionLines = [] - def "current version is next major with last minor staged"() { - given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.16.2', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('8.0.0', '9.0.0') - addVersion('8.1.0', '9.0.0') - - when: - def bwc = new BwcVersions(versionLines, v('8.1.0')) - def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } - - then: - unreleased == [ - (v('7.16.2')): new UnreleasedVersionInfo(v('7.16.2'), '7.16', ':distribution:bwc:bugfix'), - (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.17', ':distribution:bwc:staged'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.x', ':distribution:bwc:minor'), - (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') - ] - bwc.wireCompatible == [v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.16.2'), v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.minimumWireCompatibleVersion == v('7.17.0') - } - def "current version is next minor with next major and last minor both staged"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('8.0.0', '9.0.0') - addVersion('8.1.0', '9.1.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('9.0.0', '10.0.0') + addVersion('9.1.0', '10.1.0') when: - def bwc = new BwcVersions(versionLines, v('8.1.0')) + def bwc = new BwcVersions(versionLines, v('9.1.0')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), - (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.17', ':distribution:bwc:staged'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.x', ':distribution:bwc:minor'), - (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') + (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'), + (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.17', ':distribution:bwc:staged'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.x', ':distribution:bwc:minor'), + (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution') ] - bwc.wireCompatible == [v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0'), v('8.1.0')] + bwc.wireCompatible == [v('8.17.0'), v('9.0.0'), v('9.1.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0'), v('9.1.0')] } def "current is next minor with upcoming minor staged"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('7.17.1', '8.10.0') - addVersion('8.0.0', '9.0.0') - addVersion('8.1.0', '9.1.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('8.17.1', '9.10.0') + addVersion('9.0.0', '10.0.0') + addVersion('9.1.0', '10.1.0') when: - def bwc = new BwcVersions(versionLines, v('8.1.0')) + def bwc = new BwcVersions(versionLines, v('9.1.0')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:bugfix'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:staged'), - (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') + (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), '9.0', ':distribution:bwc:staged'), + (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution') ] - bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')] + bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.1.0')] } def "current version is staged major"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('7.17.1', '8.10.0') - addVersion('8.0.0', '9.0.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('8.17.1', '9.10.0') + addVersion('9.0.0', '10.0.0') when: - def bwc = new BwcVersions(versionLines, v('8.0.0')) + def bwc = new BwcVersions(versionLines, v('9.0.0')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:bugfix'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:bugfix'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0')] + bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0')] } def "current version is major with unreleased next minor"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('8.0.0', '9.0.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('9.0.0', '10.0.0') when: - def bwc = new BwcVersions(versionLines, v('8.0.0')) + def bwc = new BwcVersions(versionLines, v('9.0.0')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), - (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.x', ':distribution:bwc:minor'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + (v('8.16.1')): new UnreleasedVersionInfo(v('8.16.1'), '8.16', ':distribution:bwc:bugfix'), + (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('7.17.0'), v('8.0.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0')] + bwc.wireCompatible == [v('8.17.0'), v('9.0.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('9.0.0')] } def "current version is major with staged next minor"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('8.0.0', '9.0.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('9.0.0', '10.0.0') when: - def bwc = new BwcVersions(versionLines, v('8.0.0')) + def bwc = new BwcVersions(versionLines, v('9.0.0')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.15.2')): new UnreleasedVersionInfo(v('7.15.2'), '7.15', ':distribution:bwc:bugfix'), - (v('7.16.0')): new UnreleasedVersionInfo(v('7.16.0'), '7.16', ':distribution:bwc:staged'), - (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.x', ':distribution:bwc:minor'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + (v('8.15.2')): new UnreleasedVersionInfo(v('8.15.2'), '8.15', ':distribution:bwc:bugfix'), + (v('8.16.0')): new UnreleasedVersionInfo(v('8.16.0'), '8.16', ':distribution:bwc:staged'), + (v('8.17.0')): new UnreleasedVersionInfo(v('8.17.0'), '8.x', ':distribution:bwc:minor'), + (v('9.0.0')): new UnreleasedVersionInfo(v('9.0.0'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('7.17.0'), v('8.0.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.17.0'), v('8.0.0')] + bwc.wireCompatible == [v('8.17.0'), v('9.0.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.17.0'), v('9.0.0')] } def "current version is next bugfix"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('7.17.1', '8.10.0') - addVersion('8.0.0', '9.0.0') - addVersion('8.0.1', '9.0.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('8.17.1', '9.10.0') + addVersion('9.0.0', '10.0.0') + addVersion('9.0.1', '10.0.0') when: - def bwc = new BwcVersions(versionLines, v('8.0.1')) + def bwc = new BwcVersions(versionLines, v('9.0.1')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'), - (v('8.0.1')): new UnreleasedVersionInfo(v('8.0.1'), 'main', ':distribution'), + (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'), + (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), 'main', ':distribution'), ] - bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')] + bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1')] } def "current version is next minor with no staged releases"() { given: - addVersion('7.14.0', '8.9.0') - addVersion('7.14.1', '8.9.0') - addVersion('7.14.2', '8.9.0') - addVersion('7.15.0', '8.9.0') - addVersion('7.15.1', '8.9.0') - addVersion('7.15.2', '8.9.0') - addVersion('7.16.0', '8.10.0') - addVersion('7.16.1', '8.10.0') - addVersion('7.17.0', '8.10.0') - addVersion('7.17.1', '8.10.0') - addVersion('8.0.0', '9.0.0') - addVersion('8.0.1', '9.0.0') - addVersion('8.1.0', '9.1.0') + addVersion('8.14.0', '9.9.0') + addVersion('8.14.1', '9.9.0') + addVersion('8.14.2', '9.9.0') + addVersion('8.15.0', '9.9.0') + addVersion('8.15.1', '9.9.0') + addVersion('8.15.2', '9.9.0') + addVersion('8.16.0', '9.10.0') + addVersion('8.16.1', '9.10.0') + addVersion('8.17.0', '9.10.0') + addVersion('8.17.1', '9.10.0') + addVersion('9.0.0', '10.0.0') + addVersion('9.0.1', '10.0.0') + addVersion('9.1.0', '10.1.0') when: - def bwc = new BwcVersions(versionLines, v('8.1.0')) + def bwc = new BwcVersions(versionLines, v('9.1.0')) def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } then: unreleased == [ - (v('7.17.1')): new UnreleasedVersionInfo(v('7.17.1'), '7.17', ':distribution:bwc:maintenance'), - (v('8.0.1')): new UnreleasedVersionInfo(v('8.0.1'), '8.0', ':distribution:bwc:bugfix'), - (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') + (v('8.17.1')): new UnreleasedVersionInfo(v('8.17.1'), '8.17', ':distribution:bwc:maintenance'), + (v('9.0.1')): new UnreleasedVersionInfo(v('9.0.1'), '9.0', ':distribution:bwc:bugfix'), + (v('9.1.0')): new UnreleasedVersionInfo(v('9.1.0'), 'main', ':distribution') ] - bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] - bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] + bwc.wireCompatible == [v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')] + bwc.indexCompatible == [v('8.14.0'), v('8.14.1'), v('8.14.2'), v('8.15.0'), v('8.15.1'), v('8.15.2'), v('8.16.0'), v('8.16.1'), v('8.17.0'), v('8.17.1'), v('9.0.0'), v('9.0.1'), v('9.1.0')] } private void addVersion(String elasticsearch, String lucene) { diff --git a/build-tools/build.gradle b/build-tools/build.gradle index 7fd01f0c3d4f7..e457999fedfee 100644 --- a/build-tools/build.gradle +++ b/build-tools/build.gradle @@ -9,9 +9,6 @@ buildscript { repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() } } @@ -117,9 +114,6 @@ configurations { } repositories { - maven { - url 'https://jitpack.io' - } mavenCentral() gradlePluginPortal() } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java b/build-tools/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java index 2f5b110fc59a9..c3da389fc30d4 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java @@ -39,6 +39,12 @@ public void write(byte b[], int off, int len) throws IOException { bootstrap(); delegate.write(b, off, len); } + + @Override + public void write(byte b[]) throws IOException { + bootstrap(); + delegate.write(b); + } }; } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java b/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java index 505e9a5b114d1..28018b4c50abe 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/LoggedExec.java @@ -20,6 +20,7 @@ import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; +import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Optional; @@ -53,6 +54,7 @@ * Exec task implementation. */ @SuppressWarnings("unchecked") +@CacheableTask public abstract class LoggedExec extends DefaultTask implements FileSystemOperationsAware { private static final Logger LOGGER = Logging.getLogger(LoggedExec.class); @@ -87,6 +89,14 @@ public abstract class LoggedExec extends DefaultTask implements FileSystemOperat abstract public Property getCaptureOutput(); @Input + public Provider getWorkingDirPath() { + return getWorkingDir().map(file -> { + String relativeWorkingDir = projectLayout.getProjectDirectory().getAsFile().toPath().relativize(file.toPath()).toString(); + return relativeWorkingDir; + }); + } + + @Internal abstract public Property getWorkingDir(); @Internal @@ -117,9 +127,10 @@ public LoggedExec( * can be reused across different build invocations. * */ private void setupDefaultEnvironment(ProviderFactory providerFactory) { - getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("BUILDKITE")); getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("GRADLE_BUILD_CACHE")); - getEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("VAULT")); + + getNonTrackedEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("BUILDKITE")); + getNonTrackedEnvironment().putAll(providerFactory.environmentVariablesPrefixedBy("VAULT")); Provider javaToolchainHome = providerFactory.environmentVariable("JAVA_TOOLCHAIN_HOME"); if (javaToolchainHome.isPresent()) { getEnvironment().put("JAVA_TOOLCHAIN_HOME", javaToolchainHome); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java index e3adfe8d28148..42e576012c0c9 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/BasePluginBuildPlugin.java @@ -24,6 +24,8 @@ import org.gradle.api.Task; import org.gradle.api.Transformer; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.attributes.Attribute; +import org.gradle.api.attributes.LibraryElements; import org.gradle.api.file.CopySpec; import org.gradle.api.file.FileCollection; import org.gradle.api.file.RegularFile; @@ -126,9 +128,27 @@ private TaskProvider createBundleTasks(final Project project, PluginPropert // know about the plugin (used by test security code to statically initialize the plugin in unit tests) var testSourceSet = project.getExtensions().getByType(SourceSetContainer.class).getByName("test"); Map map = Map.of("builtBy", buildProperties); - testSourceSet.getOutput().dir(map, new File(project.getBuildDir(), "generated-resources")); + + File generatedResources = new File(project.getBuildDir(), "generated-resources"); + testSourceSet.getOutput().dir(map, generatedResources); testSourceSet.getResources().srcDir(pluginMetadata); + // expose the plugin properties and metadata for other plugins to use in their tests. + // See TestWithDependenciesPlugin for how this is used. + project.getConfigurations().create("pluginMetadata", conf -> { + conf.getAttributes().attribute(Attribute.of("pluginMetadata", Boolean.class), true); + conf.getAttributes() + .attribute( + LibraryElements.LIBRARY_ELEMENTS_ATTRIBUTE, + project.getObjects().named(LibraryElements.class, LibraryElements.RESOURCES) + ); + }); + + project.getArtifacts().add("pluginMetadata", new File(project.getBuildDir(), "generated-descriptor"), artifact -> { + artifact.builtBy(buildProperties); + }); + project.getArtifacts().add("pluginMetadata", pluginMetadata); + // getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, "plugin-metadata"); var bundleSpec = createBundleSpec(project, pluginMetadata, buildProperties); extension.setBundleSpec(bundleSpec); // create the actual bundle task, which zips up all the files for the plugin @@ -167,7 +187,7 @@ private static CopySpec createBundleSpec( copySpec.exclude("plugin-security.codebases"); }); bundleSpec.from( - (Callable>) () -> project.getPluginManager().hasPlugin("com.github.johnrengelman.shadow") + (Callable>) () -> project.getPluginManager().hasPlugin("com.gradleup.shadow") ? project.getTasks().named("shadowJar") : project.getTasks().named("jar") ); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GeneratePluginPropertiesTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GeneratePluginPropertiesTask.java index e144122f97770..6cf01814a45ef 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GeneratePluginPropertiesTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GeneratePluginPropertiesTask.java @@ -19,10 +19,13 @@ import org.gradle.api.file.RegularFileProperty; import org.gradle.api.provider.ListProperty; import org.gradle.api.provider.Property; +import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.Optional; import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskAction; import org.objectweb.asm.ClassReader; import org.objectweb.asm.tree.ClassNode; @@ -39,6 +42,7 @@ import javax.inject.Inject; +@CacheableTask public abstract class GeneratePluginPropertiesTask extends DefaultTask { public static final String PROPERTIES_FILENAME = "plugin-descriptor.properties"; @@ -82,6 +86,7 @@ public GeneratePluginPropertiesTask(ProjectLayout projectLayout) { public abstract Property getIsLicensed(); @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) public abstract ConfigurableFileCollection getModuleInfoFile(); @OutputFile diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index ec341ecfd8b79..77393fe16b4c2 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -76,6 +76,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final transient Project project; private final Provider reaper; + private final Provider testClustersRegistryProvider; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; private final ExecOperations execOperations; @@ -87,11 +88,14 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private boolean shared = false; + private int claims = 0; + public ElasticsearchCluster( String path, String clusterName, Project project, Provider reaper, + Provider testClustersRegistryProvider, FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations, ExecOperations execOperations, @@ -104,6 +108,7 @@ public ElasticsearchCluster( this.clusterName = clusterName; this.project = project; this.reaper = reaper; + this.testClustersRegistryProvider = testClustersRegistryProvider; this.fileSystemOperations = fileSystemOperations; this.archiveOperations = archiveOperations; this.execOperations = execOperations; @@ -120,6 +125,7 @@ public ElasticsearchCluster( clusterName + "-0", project, reaper, + testClustersRegistryProvider, fileSystemOperations, archiveOperations, execOperations, @@ -177,6 +183,7 @@ public void setNumberOfNodes(int numberOfNodes) { clusterName + "-" + i, project, reaper, + testClustersRegistryProvider, fileSystemOperations, archiveOperations, execOperations, @@ -408,6 +415,7 @@ public void setPreserveDataDir(boolean preserveDataDir) { public void freeze() { nodes.forEach(ElasticsearchNode::freeze); configurationFrozen.set(true); + nodes.whenObjectAdded(node -> { throw new IllegalStateException("Cannot add nodes to test cluster after is has been frozen"); }); } private void checkFrozen() { @@ -663,4 +671,11 @@ public String toString() { return "cluster{" + path + ":" + clusterName + "}"; } + int addClaim() { + return ++this.claims; + } + + int removeClaim() { + return --this.claims; + } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index df11733928f0f..4cb67e249b0b0 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -98,7 +98,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private static final int ES_DESTROY_TIMEOUT = 20; private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; - private static final int NODE_UP_TIMEOUT = 2; + private static final int NODE_UP_TIMEOUT = 3; private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.MINUTES; private static final int ADDITIONAL_CONFIG_TIMEOUT = 15; private static final TimeUnit ADDITIONAL_CONFIG_TIMEOUT_UNIT = TimeUnit.SECONDS; @@ -124,6 +124,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final String name; transient private final Project project; private final Provider reaperServiceProvider; + private final Provider testClustersRegistryProvider; + private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; private final ExecOperations execOperations; @@ -164,7 +166,6 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final List distributions = new ArrayList<>(); private int currentDistro = 0; private TestDistribution testDistribution; - private volatile Process esProcess; private Function nameCustomization = s -> s; private boolean isWorkingDirConfigured = false; private String httpPort = "0"; @@ -179,6 +180,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { String name, Project project, Provider reaperServiceProvider, + Provider testClustersRegistryProvider, FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations, ExecOperations execOperations, @@ -191,6 +193,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { this.name = name; this.project = project; this.reaperServiceProvider = reaperServiceProvider; + this.testClustersRegistryProvider = testClustersRegistryProvider; this.fileSystemOperations = fileSystemOperations; this.archiveOperations = archiveOperations; this.execOperations = execOperations; @@ -892,11 +895,13 @@ private void startElasticsearchProcess() { } } LOGGER.info("Running `{}` in `{}` for {} env: {}", command, workingDir, this, environment); + Process esProcess; try { esProcess = processBuilder.start(); } catch (IOException e) { throw new TestClustersException("Failed to start ES process for " + this, e); } + testClustersRegistryProvider.get().storeProcess(id(), esProcess); reaperServiceProvider.get().registerPid(toString(), esProcess.pid()); } @@ -982,6 +987,7 @@ public synchronized void stop(boolean tailLogs) { } catch (IOException e) { throw new UncheckedIOException(e); } + Process esProcess = testClustersRegistryProvider.get().getProcess(id()); if (esProcess == null && tailLogs) { // This is a special case. If start() throws an exception the plugin will still call stop // Another exception here would eat the orriginal. @@ -1574,6 +1580,7 @@ public List getFeatureFlags() { @Override @Internal public boolean isProcessAlive() { + Process esProcess = testClustersRegistryProvider.get().getProcess(id()); requireNonNull(esProcess, "Can't wait for `" + this + "` as it's not started. Does the task have `useCluster` ?"); return esProcess.isAlive(); } @@ -1602,6 +1609,10 @@ public int hashCode() { @Override public String toString() { + return id() + " (" + System.identityHashCode(this) + ")"; + } + + private String id() { return "node{" + path + ":" + name + "}"; } @@ -1702,7 +1713,7 @@ public CharSequence[] getArgs() { } } - private record FeatureFlag(String feature, Version from, Version until) { + public record FeatureFlag(String feature, Version from, Version until) { @Input public String getFeature() { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterInfo.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterInfo.java new file mode 100644 index 0000000000000..07663de7a9df9 --- /dev/null +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterInfo.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.gradle.testclusters; + +import java.io.File; +import java.util.List; + +public class TestClusterInfo { + private final List allHttpSocketURI; + private final List allTransportPortURI; + private final List auditLogs; + + public TestClusterInfo(List allHttpSocketURI, List allTransportPortURI, List auditLogs) { + this.allHttpSocketURI = allHttpSocketURI; + this.allTransportPortURI = allTransportPortURI; + this.auditLogs = auditLogs; + } + + public List getAllHttpSocketURI() { + return allHttpSocketURI; + } + + public List getAllTransportPortURI() { + return allTransportPortURI; + } + + public List getAuditLogs() { + return auditLogs; + } +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterValueSource.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterValueSource.java new file mode 100644 index 0000000000000..8ecadcdc6d2b1 --- /dev/null +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterValueSource.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.testclusters; + +import org.gradle.api.provider.Property; +import org.gradle.api.provider.ValueSource; +import org.gradle.api.provider.ValueSourceParameters; +import org.jetbrains.annotations.Nullable; + +public abstract class TestClusterValueSource implements ValueSource { + + @Nullable + @Override + public TestClusterInfo obtain() { + String clusterName = getParameters().getClusterName().get(); + String path = getParameters().getPath().get(); + return getParameters().getService().get().getClusterDetails(path, clusterName); + } + + interface Parameters extends ValueSourceParameters { + Property getClusterName(); + + Property getPath(); + + Property getService(); + } +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index f84aa2a0389c2..9e5fc1f09ac9e 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -8,6 +8,7 @@ */ package org.elasticsearch.gradle.testclusters; +import org.elasticsearch.gradle.ElasticsearchDistribution; import org.gradle.api.Task; import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; @@ -34,10 +35,15 @@ default void useCluster(ElasticsearchCluster cluster) { if (cluster.getPath().equals(getProject().getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); } - - cluster.getNodes() - .all(node -> node.getDistributions().forEach(distro -> dependsOn(getProject().provider(() -> distro.maybeFreeze())))); - dependsOn(cluster.getPluginAndModuleConfigurations()); + if (cluster.getName().equals(getName())) { + for (ElasticsearchNode node : cluster.getNodes()) { + for (ElasticsearchDistribution distro : node.getDistributions()) { + ElasticsearchDistribution frozenDistro = distro.maybeFreeze(); + dependsOn(frozenDistro); + } + } + dependsOn(cluster.getPluginAndModuleConfigurations()); + } getClusters().add(cluster); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 301782d52d1a3..ada31bc11a653 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -26,6 +26,7 @@ import org.gradle.api.invocation.Gradle; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.BuildService; @@ -106,15 +107,22 @@ public void apply(Project project) { runtimeJavaProvider = providerFactory.provider( () -> System.getenv("RUNTIME_JAVA_HOME") == null ? Jvm.current().getJavaHome() : new File(System.getenv("RUNTIME_JAVA_HOME")) ); + + // register cluster registry as a global build service + Provider testClustersRegistryProvider = project.getGradle() + .getSharedServices() + .registerIfAbsent(REGISTRY_SERVICE_NAME, TestClustersRegistry.class, noop()); + // enable the DSL to describe clusters - NamedDomainObjectContainer container = createTestClustersContainerExtension(project, reaperServiceProvider); + NamedDomainObjectContainer container = createTestClustersContainerExtension( + project, + testClustersRegistryProvider, + reaperServiceProvider + ); // provide a task to be able to list defined clusters. createListClustersTask(project, container); - // register cluster registry as a global build service - project.getGradle().getSharedServices().registerIfAbsent(REGISTRY_SERVICE_NAME, TestClustersRegistry.class, noop()); - // register throttle so we only run at most max-workers/2 nodes concurrently Provider testClustersThrottleProvider = project.getGradle() .getSharedServices() @@ -145,6 +153,7 @@ private void configureArtifactTransforms(Project project) { private NamedDomainObjectContainer createTestClustersContainerExtension( Project project, + Provider testClustersRegistryProvider, Provider reaper ) { // Create an extensions that allows describing clusters @@ -155,6 +164,7 @@ private NamedDomainObjectContainer createTestClustersConta name, project, reaper, + testClustersRegistryProvider, getFileSystemOperations(), getArchiveOperations(), getExecOperations(), @@ -199,7 +209,9 @@ public void apply(Project project) { Provider testClusterTasksService = project.getGradle() .getSharedServices() - .registerIfAbsent(TEST_CLUSTER_TASKS_SERVICE, TaskEventsService.class, spec -> {}); + .registerIfAbsent(TEST_CLUSTER_TASKS_SERVICE, TaskEventsService.class, spec -> { + spec.getParameters().getRegistry().set(registryProvider); + }); TestClustersRegistry registry = registryProvider.get(); // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters @@ -209,7 +221,7 @@ public void apply(Project project) { configureClaimClustersHook(project.getGradle(), registry); // Before each task, we determine if a cluster needs to be started for that task. - configureStartClustersHook(project.getGradle(), registry, testClusterTasksService); + configureStartClustersHook(project.getGradle()); // After each task we determine if there are clusters that are no longer needed. getEventsListenerRegistry().onTaskCompletion(testClusterTasksService); @@ -228,12 +240,7 @@ private static void configureClaimClustersHook(Gradle gradle, TestClustersRegist }); } - private void configureStartClustersHook( - Gradle gradle, - TestClustersRegistry registry, - Provider testClusterTasksService - ) { - testClusterTasksService.get().registry(registry); + private void configureStartClustersHook(Gradle gradle) { gradle.getTaskGraph().whenReady(taskExecutionGraph -> { taskExecutionGraph.getAllTasks() .stream() @@ -249,19 +256,14 @@ private void configureStartClustersHook( } } - static public abstract class TaskEventsService implements BuildService, OperationCompletionListener { + static public abstract class TaskEventsService implements BuildService, OperationCompletionListener { Map tasksMap = new HashMap<>(); - private TestClustersRegistry registryProvider; public void register(TestClustersAware task) { tasksMap.put(task.getPath(), task); } - public void registry(TestClustersRegistry registry) { - this.registryProvider = registry; - } - @Override public void onFinish(FinishEvent finishEvent) { if (finishEvent instanceof TaskFinishEvent taskFinishEvent) { @@ -273,11 +275,18 @@ public void onFinish(FinishEvent finishEvent) { if (task.getDidWork()) { task.getClusters() .forEach( - cluster -> registryProvider.stopCluster(cluster, taskFinishEvent.getResult() instanceof TaskFailureResult) + cluster -> getParameters().getRegistry() + .get() + .stopCluster(cluster, taskFinishEvent.getResult() instanceof TaskFailureResult) ); } } } } + + // Some parameters for the web server + interface Params extends BuildServiceParameters { + Property getRegistry(); + } } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index 8de0dd67b654c..8d2a9217e7d0c 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -10,6 +10,8 @@ import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.provider.Provider; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceParameters; @@ -17,20 +19,23 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; + +import javax.inject.Inject; public abstract class TestClustersRegistry implements BuildService { private static final Logger logger = Logging.getLogger(TestClustersRegistry.class); private static final String TESTCLUSTERS_INSPECT_FAILURE = "testclusters.inspect.failure"; private final Boolean allowClusterToSurvive = Boolean.valueOf(System.getProperty(TESTCLUSTERS_INSPECT_FAILURE, "false")); - private final Map claimsInventory = new HashMap<>(); - private final Set runningClusters = new HashSet<>(); + private final Map nodeProcesses = new HashMap<>(); + + @Inject + public abstract ProviderFactory getProviderFactory(); public void claimCluster(ElasticsearchCluster cluster) { - cluster.freeze(); - int claim = claimsInventory.getOrDefault(cluster, 0) + 1; - claimsInventory.put(cluster, claim); - if (claim > 1) { + int claims = cluster.addClaim(); + if (claims > 1) { cluster.setShared(true); } } @@ -43,6 +48,13 @@ public void maybeStartCluster(ElasticsearchCluster cluster) { cluster.start(); } + public Provider getClusterInfo(String clusterName) { + return getProviderFactory().of(TestClusterValueSource.class, spec -> { + spec.getParameters().getService().set(TestClustersRegistry.this); + spec.getParameters().getClusterName().set(clusterName); + }); + } + public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { if (taskFailed) { // If the task fails, and other tasks use this cluster, the other task will likely never be @@ -67,8 +79,7 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { runningClusters.remove(cluster); } } else { - int currentClaims = claimsInventory.getOrDefault(cluster, 0) - 1; - claimsInventory.put(cluster, currentClaims); + int currentClaims = cluster.removeClaim(); if (currentClaims <= 0 && runningClusters.contains(cluster)) { cluster.stop(false); runningClusters.remove(cluster); @@ -76,4 +87,33 @@ public void stopCluster(ElasticsearchCluster cluster, boolean taskFailed) { } } + public TestClusterInfo getClusterDetails(String path, String clusterName) { + ElasticsearchCluster cluster = runningClusters.stream() + .filter(c -> c.getPath().equals(path)) + .filter(c -> c.getName().equals(clusterName)) + .findFirst() + .orElseThrow(); + return new TestClusterInfo( + cluster.getAllHttpSocketURI(), + cluster.getAllTransportPortURI(), + cluster.getNodes().stream().map(n -> n.getAuditLog()).collect(Collectors.toList()) + ); + } + + public void restart(String path, String clusterName) { + ElasticsearchCluster cluster = runningClusters.stream() + .filter(c -> c.getPath().equals(path)) + .filter(c -> c.getName().equals(clusterName)) + .findFirst() + .orElseThrow(); + cluster.restart(); + } + + public void storeProcess(String id, Process esProcess) { + nodeProcesses.put(id, esProcess); + } + + public Process getProcess(String id) { + return nodeProcesses.get(id); + } } diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index d3d06b2de3575..f3f8e4703eba2 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -168,7 +168,6 @@ abstract class AbstractGradleFuncTest extends Specification { ${extraPlugins.collect { p -> "id '$p'" }.join('\n')} } import org.elasticsearch.gradle.Architecture - import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.Version @@ -182,7 +181,7 @@ abstract class AbstractGradleFuncTest extends Specification { ] BwcVersions versions = new BwcVersions(currentVersion, versionList) - BuildParams.init { it.setBwcVersions(provider(() -> versions)) } + buildParams.getBwcVersionsProperty().set(versions) """ } diff --git a/build.gradle b/build.gradle index 71386a37cbb0d..715614c1beea4 100644 --- a/build.gradle +++ b/build.gradle @@ -17,7 +17,6 @@ import org.elasticsearch.gradle.DistributionDownloadPlugin import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.BaseInternalPluginBuildPlugin import org.elasticsearch.gradle.internal.ResolveAllDependencies -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.util.GradleUtils import org.gradle.plugins.ide.eclipse.model.AccessRule @@ -28,10 +27,6 @@ import static org.elasticsearch.gradle.util.GradleUtils.maybeConfigure buildscript { repositories { - maven { - url 'https://jitpack.io' - } - mavenCentral() } } @@ -143,23 +138,23 @@ tasks.register("updateCIBwcVersions") { } doLast { - writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) - writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible)) + writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.indexCompatible)) + writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible)) expandBwcList( ".buildkite/pipelines/intake.yml", ".buildkite/pipelines/intake.template.yml", - filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible) + filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible) ) writeBuildkitePipeline( ".buildkite/pipelines/periodic.yml", ".buildkite/pipelines/periodic.template.yml", [ - new ListExpansion(versions: filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible), variable: "BWC_LIST"), + new ListExpansion(versions: filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible), variable: "BWC_LIST"), ], [ new StepExpansion( templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", - versions: filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible), + versions: filterIntermediatePatches(buildParams.bwcVersions.indexCompatible), variable: "BWC_STEPS" ), ] @@ -169,7 +164,7 @@ tasks.register("updateCIBwcVersions") { ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", ".buildkite/pipelines/periodic-packaging.bwc.template.yml", - filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible) + filterIntermediatePatches(buildParams.bwcVersions.indexCompatible) ) } } @@ -191,19 +186,19 @@ tasks.register("verifyVersions") { // Fetch the metadata and parse the xml into Version instances because it's more straight forward here // rather than bwcVersion ( VersionCollection ). new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> - BuildParams.bwcVersions.compareToAuthoritative( + buildParams.bwcVersions.compareToAuthoritative( new XmlParser().parse(s) .versioning.versions.version .collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ } .collect { Version.fromString(it) } ) } - verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) - verifyCiYaml(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) + verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.indexCompatible)) + verifyCiYaml(file(".ci/snapshotBwcVersions"), buildParams.bwcVersions.unreleasedIndexCompatible) // Make sure backport bot config file is up to date JsonNode backportConfig = new ObjectMapper().readTree(file(".backportrc.json")) - BuildParams.bwcVersions.forPreviousUnreleased { unreleasedVersion -> + buildParams.bwcVersions.forPreviousUnreleased { unreleasedVersion -> boolean valid = backportConfig.get("targetBranchChoices").elements().any { branchChoice -> if (branchChoice.isObject()) { return branchChoice.get("name").textValue() == unreleasedVersion.branch diff --git a/distribution/build.gradle b/distribution/build.gradle index 5b865b36f9e4d..bfbf10ac85e2f 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -14,8 +14,7 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.ConcatFilesTask import org.elasticsearch.gradle.internal.DependenciesInfoPlugin import org.elasticsearch.gradle.internal.NoticeTask -import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin +import org.elasticsearch.gradle.internal.test.ClusterFeaturesMetadataPlugin import java.nio.file.Files import java.nio.file.Path @@ -34,7 +33,7 @@ configurations { } featuresMetadata { attributes { - attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, HistoricalFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) + attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ClusterFeaturesMetadataPlugin.FEATURES_METADATA_TYPE) } } } @@ -208,7 +207,7 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { distro.copyModule(processDefaultOutputsTaskProvider, module) dependencies.add('featuresMetadata', module) - if (module.name.startsWith('transport-') || (BuildParams.snapshotBuild == false && module.name == 'apm')) { + if (module.name.startsWith('transport-') || (buildParams.snapshotBuild == false && module.name == 'apm')) { distro.copyModule(processIntegTestOutputsTaskProvider, module) } @@ -378,7 +377,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { exclude "**/platform/${excludePlatform}/**" } } - if (BuildParams.isSnapshotBuild()) { + if (buildParams.isSnapshotBuild()) { from(buildExternalTestModulesTaskProvider) } if (project.path.startsWith(':distribution:packages')) { diff --git a/distribution/docker/README.md b/distribution/docker/README.md index 49facab461edc..9438b4f1e82c3 100644 --- a/distribution/docker/README.md +++ b/distribution/docker/README.md @@ -3,8 +3,7 @@ The ES build can generate several types of Docker image. These are enumerated in the [DockerBase] enum. - * Default - this is what most people use, and is based on Ubuntu - * UBI - the same as the default image, but based upon [RedHat's UBI + * Default - this is what most people use, and is based on [RedHat's UBI images][ubi], specifically their minimal flavour. * Wolfi - the same as the default image, but based upon [Wolfi](https://github.com/wolfi-dev) * Cloud ESS - this directly extends the Wolfi image, and adds all ES plugins @@ -23,14 +22,7 @@ the [DockerBase] enum. software (FOSS) and Commercial off-the-shelf (COTS). In practice, this is another UBI build, this time on the regular UBI image, with extra hardening. See below for more details. - * Cloud - this is mostly the same as the default image, with some notable differences: - * `filebeat` and `metricbeat` are included - * `wget` is included - * The `ENTRYPOINT` is just `/bin/tini`, and the `CMD` is - `/app/elasticsearch.sh`. In normal use this file would be bind-mounted - in, but the image ships a stub version of this file so that the image - can still be tested. -The long-term goal is for both Cloud images to be retired in favour of the +The long-term goal is for Cloud ESS image to be retired in favour of the default image. diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 788e836f8f045..f5b94fb9dfd94 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -8,7 +8,6 @@ import org.elasticsearch.gradle.internal.docker.DockerSupportService import org.elasticsearch.gradle.internal.docker.ShellRetry import org.elasticsearch.gradle.internal.docker.TransformLog4jConfigFilter import org.elasticsearch.gradle.internal.docker.* -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.Architecture import java.nio.file.Path @@ -120,7 +119,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> // the image. When developing the Docker images, it's very tedious to completely rebuild // an image for every single change. Therefore, outside of CI, we fix the // build time to midnight so that the Docker build cache is usable. - def buildDate = BuildParams.isCi() ? BuildParams.buildDate : BuildParams.buildDate.truncatedTo(ChronoUnit.DAYS).toString() + def buildDate = buildParams.isCi() ? buildParams.buildDate : buildParams.buildDate.truncatedTo(ChronoUnit.DAYS).toString() return [ 'arch' : architecture.classifier, @@ -128,7 +127,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> 'bin_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'bin', 'build_date' : buildDate, 'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config', - 'git_revision' : BuildParams.gitRevision, + 'git_revision' : buildParams.gitRevision, 'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0', 'package_manager' : base.packageManager, 'docker_base' : base.name().toLowerCase(), @@ -390,7 +389,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { dockerContext.fileProvider(transformTask.map { Sync task -> task.getDestinationDir() }) - noCache = BuildParams.isCi() + noCache = buildParams.isCi() tags = generateTags(base, architecture) platforms.add(architecture.dockerPlatform) @@ -485,7 +484,7 @@ void addBuildEssDockerImageTask(Architecture architecture) { dockerContext.fileProvider(buildContextTask.map { it.getDestinationDir() }) - noCache = BuildParams.isCi() + noCache = buildParams.isCi() baseImages = [] tags = generateTags(dockerBase, architecture) platforms.add(architecture.dockerPlatform) @@ -528,9 +527,7 @@ subprojects { Project subProject -> final Architecture architecture = subProject.name.contains('aarch64-') ? Architecture.AARCH64 : Architecture.X64 DockerBase base = DockerBase.DEFAULT - if (subProject.name.contains('ubi-')) { - base = DockerBase.UBI - } else if (subProject.name.contains('ironbank-')) { + if (subProject.name.contains('ironbank-')) { base = DockerBase.IRON_BANK } else if (subProject.name.contains('cloud-ess-')) { base = DockerBase.CLOUD_ESS @@ -539,11 +536,11 @@ subprojects { Project subProject -> } final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : '' - final String extension = base == DockerBase.UBI ? 'ubi.tar' : + final String extension = (base == DockerBase.IRON_BANK ? 'ironbank.tar' : - (base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' : - (base == DockerBase.WOLFI ? 'wolfi.tar' : - 'docker.tar'))) + (base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' : + (base == DockerBase.WOLFI ? 'wolfi.tar' : + 'docker.tar'))) final String artifactName = "elasticsearch${arch}${base.suffix}_test" final String exportTaskName = taskName("export", architecture, base, 'DockerImage') diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index fd2516f2fdc9a..6cb030565d9d2 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -41,9 +41,7 @@ RUN chmod 0555 /bin/tini <% } else { %> # Install required packages to extract the Elasticsearch distribution -<% if (docker_base == 'default' || docker_base == 'cloud') { %> -RUN <%= retry.loop(package_manager, "${package_manager} update && DEBIAN_FRONTEND=noninteractive ${package_manager} install -y curl ") %> -<% } else if (docker_base == "wolfi") { %> +<% if (docker_base == "wolfi") { %> RUN <%= retry.loop(package_manager, "export DEBIAN_FRONTEND=noninteractive && ${package_manager} update && ${package_manager} update && ${package_manager} add --no-cache curl") %> <% } else { %> RUN <%= retry.loop(package_manager, "${package_manager} install -y findutils tar gzip") %> @@ -117,27 +115,6 @@ RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elas chmod 0775 bin config config/jvm.options.d data logs plugins && \\ find config -type f -exec chmod 0664 {} + -<% if (docker_base == "cloud") { %> -COPY filebeat-${version}.tar.gz metricbeat-${version}.tar.gz /tmp/ -RUN set -eux ; \\ - for beat in filebeat metricbeat ; do \\ - if [ ! -s /tmp/\$beat-${version}.tar.gz ]; then \\ - echo "/tmp/\$beat-${version}.tar.gz is empty - cannot uncompress" 2>&1 ; \\ - exit 1 ; \\ - fi ; \\ - if ! tar tf /tmp/\$beat-${version}.tar.gz >/dev/null; then \\ - echo "/tmp/\$beat-${version}.tar.gz is corrupt - cannot uncompress" 2>&1 ; \\ - exit 1 ; \\ - fi ; \\ - mkdir -p /opt/\$beat ; \\ - tar xf /tmp/\$beat-${version}.tar.gz -C /opt/\$beat --strip-components=1 ; \\ - done - -# Add plugins infrastructure -RUN mkdir -p /opt/plugins/archive -RUN chmod -R 0555 /opt/plugins -<% } %> - ################################################################################ # Build stage 2 (the actual Elasticsearch image): # @@ -173,21 +150,6 @@ SHELL ["/bin/bash", "-c"] # Optionally set Bash as the default shell in the container at runtime CMD ["/bin/bash"] -<% } else if (docker_base == "default" || docker_base == "cloud") { %> - -# Change default shell to bash, then install required packages with retries. -RUN yes no | dpkg-reconfigure dash && \\ - <%= retry.loop( - package_manager, - "export DEBIAN_FRONTEND=noninteractive && \n" + - " ${package_manager} update && \n" + - " ${package_manager} upgrade -y && \n" + - " ${package_manager} install -y --no-install-recommends \n" + - " ca-certificates curl netcat p11-kit unzip zip ${docker_base == 'cloud' ? 'wget' : '' } && \n" + - " ${package_manager} clean && \n" + - " rm -rf /var/lib/apt/lists/*" - ) %> - <% } else { %> RUN <%= retry.loop( @@ -201,12 +163,7 @@ RUN <%= retry.loop( <% } %> -<% if (docker_base == "default" || docker_base == "cloud") { %> -RUN groupadd -g 1000 elasticsearch && \\ - adduser --uid 1000 --gid 1000 --home /usr/share/elasticsearch elasticsearch && \\ - adduser elasticsearch root && \\ - chown -R 0:0 /usr/share/elasticsearch -<% } else if (docker_base == "wolfi") { %> +<% if (docker_base == "wolfi") { %> RUN groupadd -g 1000 elasticsearch && \ adduser -G elasticsearch -u 1000 elasticsearch -D --home /usr/share/elasticsearch elasticsearch && \ adduser elasticsearch root && \ @@ -226,10 +183,6 @@ COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearc COPY --from=builder --chown=0:0 /bin/tini /bin/tini <% } %> -<% if (docker_base == 'cloud') { %> -COPY --from=builder --chown=0:0 /opt /opt -<% } %> - ENV PATH /usr/share/elasticsearch/bin:\$PATH ENV SHELL /bin/bash COPY ${bin_dir}/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh @@ -251,12 +204,7 @@ RUN chmod g=u /etc/passwd && \\ chmod 0775 /usr/share/elasticsearch && \\ chown elasticsearch bin config config/jvm.options.d data logs plugins -<% if (docker_base == 'default' || docker_base == 'cloud') { %> -# Update "cacerts" bundle to use Ubuntu's CA certificates (and make sure it -# stays up-to-date with changes to Ubuntu's store) -COPY bin/docker-openjdk /etc/ca-certificates/update.d/docker-openjdk -RUN /etc/ca-certificates/update.d/docker-openjdk -<% } else if (docker_base == 'wolfi') { %> +<% if (docker_base == 'wolfi') { %> RUN ln -sf /etc/ssl/certs/java/cacerts /usr/share/elasticsearch/jdk/lib/security/cacerts <% } else { %> RUN ln -sf /etc/pki/ca-trust/extracted/java/cacerts /usr/share/elasticsearch/jdk/lib/security/cacerts @@ -284,9 +232,7 @@ LABEL org.label-schema.build-date="${build_date}" \\ org.opencontainers.image.url="https://www.elastic.co/products/elasticsearch" \\ org.opencontainers.image.vendor="Elastic" \\ org.opencontainers.image.version="${version}" -<% } %> -<% if (docker_base == 'ubi') { %> LABEL name="Elasticsearch" \\ maintainer="infra@elastic.co" \\ vendor="Elastic" \\ @@ -296,21 +242,12 @@ LABEL name="Elasticsearch" \\ description="You know, for search." <% } %> -<% if (docker_base == 'ubi') { %> -RUN mkdir /licenses && cp LICENSE.txt /licenses/LICENSE -<% } else if (docker_base == 'iron_bank') { %> RUN mkdir /licenses && cp LICENSE.txt /licenses/LICENSE +<% if (docker_base == 'iron_bank') { %> COPY LICENSE /licenses/LICENSE.addendum <% } %> -<% if (docker_base == "cloud") { %> -ENTRYPOINT ["/bin/tini", "--"] -CMD ["/app/elasticsearch.sh"] -# Generate a stub command that will be overwritten at runtime -RUN mkdir /app && \\ - echo -e '#!/bin/bash\\nexec /usr/local/bin/docker-entrypoint.sh eswrapper' > /app/elasticsearch.sh && \\ - chmod 0555 /app/elasticsearch.sh -<% } else if (docker_base == "wolfi") { %> +<% if (docker_base == "wolfi") { %> # Our actual entrypoint is `tini`, a minimal but functional init program. It # calls the entrypoint we provide, while correctly forwarding signals. ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"] diff --git a/distribution/docker/ubi-docker-aarch64-export/build.gradle b/distribution/docker/ubi-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/ubi-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/ubi-docker-export/build.gradle b/distribution/docker/ubi-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/ubi-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index e08f16c14ab88..5f45b4b72974f 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -10,7 +10,6 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.internal.info.BuildParams import org.redline_rpm.header.Flags import java.nio.file.Files @@ -44,7 +43,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.1" + id "com.netflix.nebula.ospackage-base" version "11.10.0" } ['deb', 'rpm'].each { type -> @@ -196,7 +195,7 @@ def commonPackageConfig(String type, String architecture) { configurationFile '/etc/elasticsearch/users_roles' from("${packagingFiles}") { dirPermissions { - unix(02750) + unix(0750) } into('/etc') permissionGroup 'elasticsearch' @@ -209,7 +208,7 @@ def commonPackageConfig(String type, String architecture) { from("${packagingFiles}/etc/elasticsearch") { into('/etc/elasticsearch') dirPermissions { - unix(02750) + unix(0750) } setgid = true filePermissions { @@ -261,7 +260,7 @@ def commonPackageConfig(String type, String architecture) { // ========= empty dirs ========= // NOTE: these are created under packagingFiles as empty, but the permissions are set here - Closure copyEmptyDir = { path, u, g, mode -> + Closure copyEmptyDir = { path, u, g, gid, mode -> File file = new File(path) into(file.parent) { from "${packagingFiles}/${file.parent}" @@ -273,12 +272,12 @@ def commonPackageConfig(String type, String architecture) { dirPermissions { unix(mode) } - setgid (mode == 02750) + setgid(gid) } } - copyEmptyDir('/var/log/elasticsearch', 'elasticsearch', 'elasticsearch', 02750) - copyEmptyDir('/var/lib/elasticsearch', 'elasticsearch', 'elasticsearch', 02750) - copyEmptyDir('/usr/share/elasticsearch/plugins', 'root', 'root', 0755) + copyEmptyDir('/var/log/elasticsearch', 'elasticsearch', 'elasticsearch', true, 0750) + copyEmptyDir('/var/lib/elasticsearch', 'elasticsearch', 'elasticsearch', true, 0750) + copyEmptyDir('/usr/share/elasticsearch/plugins', 'root', 'root', false, 0755) // the oss package conflicts with the default distribution and vice versa conflicts('elasticsearch-oss') @@ -301,7 +300,7 @@ ospackage { url 'https://www.elastic.co/' // signing setup - if (project.hasProperty('signing.password') && BuildParams.isSnapshotBuild() == false) { + if (project.hasProperty('signing.password') && buildParams.isSnapshotBuild() == false) { signingKeyId = project.hasProperty('signing.keyId') ? project.property('signing.keyId') : 'D88E42B4' signingKeyPassphrase = project.property('signing.password') signingKeyRingFile = project.hasProperty('signing.secretKeyRingFile') ? @@ -335,7 +334,6 @@ Closure commonDebConfig(String architecture) { // versions found on oldest supported distro, centos-6 requires('bash', '4.1', GREATER | EQUAL) - requires('lsb-base', '4', GREATER | EQUAL) requires 'libc6' requires 'adduser' diff --git a/distribution/packages/src/deb/lintian/elasticsearch b/distribution/packages/src/deb/lintian/elasticsearch index edd705b66caaa..1622d8d8aeb40 100644 --- a/distribution/packages/src/deb/lintian/elasticsearch +++ b/distribution/packages/src/deb/lintian/elasticsearch @@ -5,8 +5,6 @@ changelog-file-missing-in-native-package # we intentionally copy our copyright file for all deb packages -copyright-file-contains-full-apache-2-license -copyright-not-using-common-license-for-apache2 copyright-without-copyright-notice # we still put all our files under /usr/share/elasticsearch even after transition to platform dependent packages @@ -16,37 +14,23 @@ arch-dependent-file-in-usr-share missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable -non-standard-file-perm etc/default/elasticsearch 0660 != 0644 -non-standard-dir-perm etc/elasticsearch/ 2750 != 0755 -non-standard-dir-perm etc/elasticsearch/jvm.options.d/ 2750 != 0755 -non-standard-file-perm etc/elasticsearch/* -non-standard-dir-perm var/lib/elasticsearch/ 2750 != 0755 -non-standard-dir-perm var/log/elasticsearch/ 2750 != 0755 - -# this lintian tag is simply wrong; contrary to the explanation, Debian systemd -# does actually look at /usr/lib/systemd/system -systemd-service-file-outside-lib usr/lib/systemd/system/elasticsearch.service +non-standard-file-perm 0660 != 0644 [etc/default/elasticsearch] +non-standard-dir-perm 2750 != 0755 [etc/elasticsearch/] +non-standard-dir-perm 2750 != 0755 [etc/elasticsearch/jvm.options.d/] +non-standard-file-perm 0660 != 0644 [etc/elasticsearch/*] +non-standard-dir-perm 2750 != 0755 [var/lib/elasticsearch/] +non-standard-dir-perm 2750 != 0755 [var/log/elasticsearch/] # the package scripts handle systemd directly and don't need to use deb helpers maintainer-script-calls-systemctl # bundled JDK embedded-library -unstripped-binary-or-object usr/share/elasticsearch/jdk/* -extra-license-file usr/share/elasticsearch/jdk/legal/* -hardening-no-pie usr/share/elasticsearch/jdk/bin/* -hardening-no-pie usr/share/elasticsearch/jdk/lib/* +unstripped-binary-or-object [usr/share/elasticsearch/jdk/*] # the system java version that lintian assumes is far behind what elasticsearch uses unknown-java-class-version -# elastic licensed modules contain elastic license -extra-license-file usr/share/elasticsearch/modules/* - -# This dependency appears to have a packaging flaw, and includes a -# generated source file alongside the compiled version -jar-contains-source usr/share/elasticsearch/modules/repository-gcs/api-common*.jar * - # There's no `License` field in Debian control files, but earlier versions # of `lintian` were more permissive. Override this warning so that we can # run `lintian` on different releases of Debian. The format of this override @@ -58,8 +42,27 @@ unknown-field License # indirectly to libc via libdl. This might not be best practice but we # don't build them ourselves and the license precludes us modifying them # to fix this. -library-not-linked-against-libc usr/share/elasticsearch/modules/x-pack-ml/platform/linux-x86_64/lib/libmkl_*.so +library-not-linked-against-libc [usr/share/elasticsearch/modules/x-pack-ml/platform/linux-x86_64/lib/libmkl_*.so*] + + +# Below is the copy of some of the above rules in format for Lintian versions <= 2.104 (Debian 11) +# Override syntax changes between Lintian versions in a non-backwards compatible way, so we handle it with +# duplication and ignoring some issues in the test code. + + +# we prefer to not make our config and log files world readable +non-standard-file-perm etc/default/elasticsearch 0660 != 0644 +non-standard-dir-perm etc/elasticsearch/ 2750 != 0755 +non-standard-dir-perm etc/elasticsearch/jvm.options.d/ 2750 != 0755 +non-standard-file-perm etc/elasticsearch/* +non-standard-dir-perm var/lib/elasticsearch/ 2750 != 0755 +non-standard-dir-perm var/log/elasticsearch/ 2750 != 0755 -# shared-lib-without-dependency-information (now shared-library-lacks-prerequisites) is falsely reported for libvec.so -# which has no dependencies (not even libc) besides the symbols in the base executable. -shared-lib-without-dependency-information usr/share/elasticsearch/lib/platform/linux-x64/libvec.so +# bundled JDK +unstripped-binary-or-object usr/share/elasticsearch/jdk/* + +# Intel MKL libraries are not linked directly to libc. They are linked +# indirectly to libc via libdl. This might not be best practice but we +# don't build them ourselves and the license precludes us modifying them +# to fix this. +library-not-linked-against-libc usr/share/elasticsearch/modules/x-pack-ml/platform/linux-x86_64/lib/libmkl_*.so* diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index ac8ade89c9014..57750f2162a71 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -29,7 +29,7 @@ dependencies { implementation 'org.ow2.asm:asm-tree:9.7' api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.4" + api "org.bouncycastle:bc-fips:1.0.2.5" testImplementation project(":test:framework") testImplementation "com.google.jimfs:jimfs:${versions.jimfs}" testRuntimeOnly "com.google.guava:guava:${versions.jimfs_guava}" diff --git a/docs/build.gradle b/docs/build.gradle index e495ecacce27b..dec0de8ffa844 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1,5 +1,4 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.doc.DocSnippetTask import static org.elasticsearch.gradle.testclusters.TestDistribution.DEFAULT @@ -29,7 +28,7 @@ ext.docsFileTree = fileTree(projectDir) { // These files simply don't pass yet. We should figure out how to fix them. exclude 'reference/watcher/reference/actions.asciidoc' exclude 'reference/rest-api/security/ssl.asciidoc' - if (BuildParams.inFipsJvm) { + if (buildParams.inFipsJvm) { // We don't support this component in FIPS 140 exclude 'reference/ingest/processors/attachment.asciidoc' // We can't conditionally control output, this would be missing the ingest-attachment component @@ -38,7 +37,7 @@ ext.docsFileTree = fileTree(projectDir) { } tasks.named("yamlRestTest") { - if (BuildParams.isSnapshotBuild() == false) { + if (buildParams.isSnapshotBuild() == false) { // LOOKUP is not available in snapshots systemProperty 'tests.rest.blacklist', [ "reference/esql/processing-commands/lookup/esql-lookup-example" @@ -83,7 +82,7 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.history_index_enabled', 'false' keystorePassword 'keystore-password' - if (BuildParams.isSnapshotBuild() == false) { + if (buildParams.isSnapshotBuild() == false) { requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) } } @@ -170,7 +169,7 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { return } // Do not install ingest-attachment in a FIPS 140 JVM as this is not supported - if (subproj.path.startsWith(':modules:ingest-attachment') && BuildParams.inFipsJvm) { + if (subproj.path.startsWith(':modules:ingest-attachment') && buildParams.inFipsJvm) { return } plugin subproj.path diff --git a/docs/changelog/104125.yaml b/docs/changelog/104125.yaml new file mode 100644 index 0000000000000..e5c5ea6a3f1cd --- /dev/null +++ b/docs/changelog/104125.yaml @@ -0,0 +1,18 @@ +pr: 104125 +summary: Disable machine learning on macOS x86_64 +area: Machine Learning +type: breaking +issues: [] +breaking: + title: Disable machine learning on macOS x86_64 + area: Packaging + details: The machine learning plugin is permanently disabled on macOS x86_64. + For the last three years Apple has been selling hardware based on the arm64 + architecture, and support will increasingly focus on this architecture in + the future. Changes to upstream dependencies of Elastic's machine learning + functionality have made it unviable for Elastic to continue to build machine + learning on macOS x86_64. + impact: To continue to use machine learning functionality on macOS please switch to + an arm64 machine (Apple silicon). Alternatively, it will still be possible to run + Elasticsearch with machine learning enabled in a Docker container on macOS x86_64. + notable: false diff --git a/docs/changelog/111494.yaml b/docs/changelog/111494.yaml new file mode 100644 index 0000000000000..6c7b84bb04798 --- /dev/null +++ b/docs/changelog/111494.yaml @@ -0,0 +1,5 @@ +pr: 111494 +summary: Extensible Completion Postings Formats +area: "Suggesters" +type: enhancement +issues: [] diff --git a/docs/changelog/112989.yaml b/docs/changelog/112989.yaml new file mode 100644 index 0000000000000..364f012f94420 --- /dev/null +++ b/docs/changelog/112989.yaml @@ -0,0 +1,5 @@ +pr: 112989 +summary: Upgrade Bouncy Castle FIPS dependencies +area: Security +type: upgrade +issues: [] diff --git a/docs/changelog/113120.yaml b/docs/changelog/113120.yaml new file mode 100644 index 0000000000000..801167d61c19c --- /dev/null +++ b/docs/changelog/113120.yaml @@ -0,0 +1,5 @@ +pr: 113120 +summary: ESQL - enabling scoring with METADATA `_score` +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/114193.yaml b/docs/changelog/114193.yaml deleted file mode 100644 index f18f9359007b8..0000000000000 --- a/docs/changelog/114193.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114193 -summary: Add postal_code support to the City and Enterprise databases -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114202.yaml b/docs/changelog/114202.yaml new file mode 100644 index 0000000000000..50313b8938aa9 --- /dev/null +++ b/docs/changelog/114202.yaml @@ -0,0 +1,14 @@ +pr: 114202 +summary: Remove deprecated `xpack.searchable.snapshot.allocate_on_rolling_restart` setting +area: Snapshot/Restore +type: breaking +issues: [] +breaking: + title: Remove deprecated `xpack.searchable.snapshot.allocate_on_rolling_restart` setting + area: 'Cluster and node setting' + details: >- + The `xpack.searchable.snapshot.allocate_on_rolling_restart` setting was created as an escape-hatch just in case + relying on the `cluster.routing.allocation.enable=primaries` setting for allocating searchable snapshots during + rolling restarts had some unintended side-effects. It has been deprecated since 8.2.0. + impact: Remove `xpack.searchable.snapshot.allocate_on_rolling_restart` from your settings if present. + notable: false diff --git a/docs/changelog/114227.yaml b/docs/changelog/114227.yaml deleted file mode 100644 index 9b508f07c9e5a..0000000000000 --- a/docs/changelog/114227.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114227 -summary: Ignore conflicting fields during dynamic mapping update -area: Mapping -type: bug -issues: - - 114228 diff --git a/docs/changelog/114268.yaml b/docs/changelog/114268.yaml deleted file mode 100644 index 5e4457005d7d3..0000000000000 --- a/docs/changelog/114268.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114268 -summary: Support more maxmind fields in the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114317.yaml b/docs/changelog/114317.yaml new file mode 100644 index 0000000000000..9c73fe513e197 --- /dev/null +++ b/docs/changelog/114317.yaml @@ -0,0 +1,5 @@ +pr: 114317 +summary: "ESQL: CATEGORIZE as a `BlockHash`" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/114521.yaml b/docs/changelog/114521.yaml deleted file mode 100644 index c3a9c7cdd0848..0000000000000 --- a/docs/changelog/114521.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114521 -summary: Add support for registered country fields for maxmind geoip databases -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114548.yaml b/docs/changelog/114548.yaml deleted file mode 100644 index b9692bcb2d10c..0000000000000 --- a/docs/changelog/114548.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114548 -summary: Support IPinfo database configurations -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114914.yaml b/docs/changelog/114914.yaml new file mode 100644 index 0000000000000..bad13e26682dc --- /dev/null +++ b/docs/changelog/114914.yaml @@ -0,0 +1,5 @@ +pr: 114914 +summary: Adding chunking settings to `IbmWatsonxService` +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/115020.yaml b/docs/changelog/115020.yaml new file mode 100644 index 0000000000000..2b0aefafea507 --- /dev/null +++ b/docs/changelog/115020.yaml @@ -0,0 +1,5 @@ +pr: 115020 +summary: Adding endpoint creation validation for all task types to remaining services +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/115091.yaml b/docs/changelog/115091.yaml new file mode 100644 index 0000000000000..762bcca5e8c52 --- /dev/null +++ b/docs/changelog/115091.yaml @@ -0,0 +1,7 @@ +pr: 115091 +summary: Added stricter range type checks and runtime warnings for ENRICH +area: ES|QL +type: bug +issues: + - 107357 + - 116799 diff --git a/docs/changelog/115142.yaml b/docs/changelog/115142.yaml new file mode 100644 index 0000000000000..2af968ae156da --- /dev/null +++ b/docs/changelog/115142.yaml @@ -0,0 +1,6 @@ +pr: 115142 +summary: Attempt to clean up index before remote transfer +area: Recovery +type: enhancement +issues: + - 104473 diff --git a/docs/changelog/115585.yaml b/docs/changelog/115585.yaml new file mode 100644 index 0000000000000..02eecfc3d7d2b --- /dev/null +++ b/docs/changelog/115585.yaml @@ -0,0 +1,6 @@ +pr: 115459 +summary: Adds access to flags no_sub_matches and no_overlapping_matches to hyphenation-decompounder-tokenfilter +area: Search +type: enhancement +issues: + - 97849 diff --git a/docs/changelog/115616.yaml b/docs/changelog/115616.yaml new file mode 100644 index 0000000000000..4fb4dc18538de --- /dev/null +++ b/docs/changelog/115616.yaml @@ -0,0 +1,6 @@ +pr: 115616 +summary: Fix double lookup failure on ESQL +area: ES|QL +type: bug +issues: + - 111398 diff --git a/docs/changelog/115678.yaml b/docs/changelog/115678.yaml new file mode 100644 index 0000000000000..31240eae1ebb4 --- /dev/null +++ b/docs/changelog/115678.yaml @@ -0,0 +1,5 @@ +pr: 115678 +summary: "ESQL: extract common filter from aggs" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/115814.yaml b/docs/changelog/115814.yaml new file mode 100644 index 0000000000000..34f1213272d6f --- /dev/null +++ b/docs/changelog/115814.yaml @@ -0,0 +1,6 @@ +pr: 115814 +summary: "[ES|QL] Implicit casting string literal to intervals" +area: ES|QL +type: enhancement +issues: + - 115352 diff --git a/docs/changelog/115930.yaml b/docs/changelog/115930.yaml new file mode 100644 index 0000000000000..788a01b5cac96 --- /dev/null +++ b/docs/changelog/115930.yaml @@ -0,0 +1,5 @@ +pr: 115930 +summary: Inconsistency in the _analyzer api when the index is not included +area: Search +type: bug +issues: [] diff --git a/docs/changelog/116115.yaml b/docs/changelog/116115.yaml new file mode 100644 index 0000000000000..33e1735c20ca4 --- /dev/null +++ b/docs/changelog/116115.yaml @@ -0,0 +1,5 @@ +pr: 116115 +summary: Allow http unsafe buffers by default +area: Network +type: enhancement +issues: [] diff --git a/docs/changelog/116292.yaml b/docs/changelog/116292.yaml deleted file mode 100644 index f741c67bea155..0000000000000 --- a/docs/changelog/116292.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116292 -summary: Add missing header in `put_data_lifecycle` rest-api-spec -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/116348.yaml b/docs/changelog/116348.yaml new file mode 100644 index 0000000000000..927ffc5a6121d --- /dev/null +++ b/docs/changelog/116348.yaml @@ -0,0 +1,5 @@ +pr: 116348 +summary: "ESQL: Honor skip_unavailable setting for nonmatching indices errors at planning time" +area: ES|QL +type: enhancement +issues: [ 114531 ] diff --git a/docs/changelog/116357.yaml b/docs/changelog/116357.yaml deleted file mode 100644 index a1a7831eab9ca..0000000000000 --- a/docs/changelog/116357.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116357 -summary: Add tracking for query rule types -area: Relevance -type: enhancement -issues: [] diff --git a/docs/changelog/116358.yaml b/docs/changelog/116358.yaml new file mode 100644 index 0000000000000..58b44a1e9bcf5 --- /dev/null +++ b/docs/changelog/116358.yaml @@ -0,0 +1,5 @@ +pr: 116358 +summary: Update Deberta tokenizer +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/116382.yaml b/docs/changelog/116382.yaml deleted file mode 100644 index c941fb6eaa1e4..0000000000000 --- a/docs/changelog/116382.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116382 -summary: Validate missing shards after the coordinator rewrite -area: Search -type: bug -issues: [] diff --git a/docs/changelog/116408.yaml b/docs/changelog/116408.yaml deleted file mode 100644 index 5f4c8459778a6..0000000000000 --- a/docs/changelog/116408.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116408 -summary: Propagating nested `inner_hits` to the parent compound retriever -area: Ranking -type: bug -issues: - - 116397 diff --git a/docs/changelog/116478.yaml b/docs/changelog/116478.yaml deleted file mode 100644 index ec50799eb2019..0000000000000 --- a/docs/changelog/116478.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116478 -summary: Semantic text simple partial update -area: Search -type: bug -issues: [] diff --git a/docs/changelog/116515.yaml b/docs/changelog/116515.yaml new file mode 100644 index 0000000000000..6c0d473361e52 --- /dev/null +++ b/docs/changelog/116515.yaml @@ -0,0 +1,5 @@ +pr: 116515 +summary: Esql/lookup join grammar +area: ES|QL +type: feature +issues: [] diff --git a/docs/changelog/116531.yaml b/docs/changelog/116531.yaml new file mode 100644 index 0000000000000..908bbff487973 --- /dev/null +++ b/docs/changelog/116531.yaml @@ -0,0 +1,5 @@ +pr: 116531 +summary: "Add a standard deviation aggregating function: STD_DEV" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/116650.yaml b/docs/changelog/116650.yaml deleted file mode 100644 index d314a918aede9..0000000000000 --- a/docs/changelog/116650.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116650 -summary: Fix bug in ML autoscaling when some node info is unavailable -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/116689.yaml b/docs/changelog/116689.yaml new file mode 100644 index 0000000000000..0b1d1646868aa --- /dev/null +++ b/docs/changelog/116689.yaml @@ -0,0 +1,10 @@ +pr: 116689 +summary: Deprecate `_source.mode` in mappings +area: Mapping +type: deprecation +issues: [] +deprecation: + title: Deprecate `_source.mode` in mappings + area: Mapping + details: Configuring `_source.mode` in mappings is deprecated and will be removed in future versions. Use `index.mapping.source.mode` index setting instead. + impact: Use `index.mapping.source.mode` index setting instead diff --git a/docs/changelog/116692.yaml b/docs/changelog/116692.yaml new file mode 100644 index 0000000000000..30f9e62095436 --- /dev/null +++ b/docs/changelog/116692.yaml @@ -0,0 +1,5 @@ +pr: 116692 +summary: Remove all mentions of eis and gateway and deprecate flags that do +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/116739.yaml b/docs/changelog/116739.yaml new file mode 100644 index 0000000000000..ea3b1253a9008 --- /dev/null +++ b/docs/changelog/116739.yaml @@ -0,0 +1,5 @@ +pr: 116739 +summary: Change default Docker image to be based on UBI minimal instead of Ubuntu +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/116765.yaml b/docs/changelog/116765.yaml new file mode 100644 index 0000000000000..ec2357c17acaf --- /dev/null +++ b/docs/changelog/116765.yaml @@ -0,0 +1,5 @@ +pr: 116765 +summary: Metrics for incremental bulk splits +area: Distributed +type: enhancement +issues: [] diff --git a/docs/changelog/116809.yaml b/docs/changelog/116809.yaml new file mode 100644 index 0000000000000..61dbeb233d576 --- /dev/null +++ b/docs/changelog/116809.yaml @@ -0,0 +1,5 @@ +pr: 116809 +summary: "Distinguish `LicensedFeature` by family field" +area: License +type: bug +issues: [] diff --git a/docs/changelog/116819.yaml b/docs/changelog/116819.yaml new file mode 100644 index 0000000000000..afe06c583fe55 --- /dev/null +++ b/docs/changelog/116819.yaml @@ -0,0 +1,5 @@ +pr: 116819 +summary: ESQL - Add match operator (:) +area: Search +type: feature +issues: [] diff --git a/docs/changelog/116922.yaml b/docs/changelog/116922.yaml new file mode 100644 index 0000000000000..39e63da50ea24 --- /dev/null +++ b/docs/changelog/116922.yaml @@ -0,0 +1,5 @@ +pr: 116922 +summary: Always check if index mode is logsdb +area: Logs +type: bug +issues: [] diff --git a/docs/changelog/116931.yaml b/docs/changelog/116931.yaml new file mode 100644 index 0000000000000..8b31d236ff137 --- /dev/null +++ b/docs/changelog/116931.yaml @@ -0,0 +1,5 @@ +pr: 116931 +summary: Enable built-in Inference Endpoints and default for Semantic Text +area: "Machine Learning" +type: enhancement +issues: [] diff --git a/docs/changelog/116943.yaml b/docs/changelog/116943.yaml new file mode 100644 index 0000000000000..3fd0793610cdd --- /dev/null +++ b/docs/changelog/116943.yaml @@ -0,0 +1,11 @@ +pr: 116943 +summary: Remove support for deprecated `force_source` highlighting parameter +area: Highlighting +type: breaking +issues: [] +breaking: + title: Remove support for deprecated `force_source` highlighting parameter + area: REST API + details: The deprecated highlighting `force_source` parameter is no longer supported. + impact: Users should remove usages of the `force_source` parameter from their search requests. + notable: false diff --git a/docs/changelog/116944.yaml b/docs/changelog/116944.yaml new file mode 100644 index 0000000000000..e7833e49cf965 --- /dev/null +++ b/docs/changelog/116944.yaml @@ -0,0 +1,11 @@ +pr: 116944 +summary: "Remove support for type, fields, `copy_to` and boost in metadata field definition" +area: Mapping +type: breaking +issues: [] +breaking: + title: "Remove support for type, fields, copy_to and boost in metadata field definition" + area: Mapping + details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition + impact: Users providing type, fields, copy_to or boost as part of metadata field definition should remove them from their mappings. + notable: false diff --git a/docs/changelog/116957.yaml b/docs/changelog/116957.yaml new file mode 100644 index 0000000000000..1020190de180d --- /dev/null +++ b/docs/changelog/116957.yaml @@ -0,0 +1,5 @@ +pr: 116957 +summary: Propagate scoring function through random sampler +area: Machine Learning +type: bug +issues: [ 110134 ] diff --git a/docs/changelog/116962.yaml b/docs/changelog/116962.yaml new file mode 100644 index 0000000000000..8f16b00e3f9fc --- /dev/null +++ b/docs/changelog/116962.yaml @@ -0,0 +1,5 @@ +pr: 116962 +summary: "Add special case for elastic reranker in inference API" +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/116970.yaml b/docs/changelog/116970.yaml new file mode 100644 index 0000000000000..66de673dfb53c --- /dev/null +++ b/docs/changelog/116970.yaml @@ -0,0 +1,11 @@ +pr: 116970 +summary: Remove legacy params from range query +area: Search +type: breaking +issues: [] +breaking: + title: Remove legacy params from range query + area: REST API + details: The deprecated range query parameters `to`, `from`, `include_lower`, and `include_upper` are no longer supported. + impact: Users should use `lt`, `lte`, `gt`, and `gte` query parameters instead. + notable: false diff --git a/docs/changelog/116980.yaml b/docs/changelog/116980.yaml new file mode 100644 index 0000000000000..140324fd40b92 --- /dev/null +++ b/docs/changelog/116980.yaml @@ -0,0 +1,6 @@ +pr: 116980 +summary: "ESQL: Fix sorts containing `_source`" +area: ES|QL +type: bug +issues: + - 116659 diff --git a/docs/changelog/117080.yaml b/docs/changelog/117080.yaml new file mode 100644 index 0000000000000..5909f966e0fa2 --- /dev/null +++ b/docs/changelog/117080.yaml @@ -0,0 +1,5 @@ +pr: 117080 +summary: Esql Enable Date Nanos (tech preview) +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/117095.yaml b/docs/changelog/117095.yaml new file mode 100644 index 0000000000000..27460924ecb71 --- /dev/null +++ b/docs/changelog/117095.yaml @@ -0,0 +1,5 @@ +pr: 117095 +summary: Add version prefix to Inference Service API path +area: Inference +type: enhancement +issues: [] diff --git a/docs/changelog/117105.yaml b/docs/changelog/117105.yaml new file mode 100644 index 0000000000000..de56c4d521a62 --- /dev/null +++ b/docs/changelog/117105.yaml @@ -0,0 +1,6 @@ +pr: 117105 +summary: Fix long metric deserialize & add - auto-resize needs to be set manually +area: CCS +type: bug +issues: + - 116914 diff --git a/docs/changelog/117148.yaml b/docs/changelog/117148.yaml new file mode 100644 index 0000000000000..92dd69672616a --- /dev/null +++ b/docs/changelog/117148.yaml @@ -0,0 +1,5 @@ +pr: 117148 +summary: Preserve thread context when waiting for segment generation in RTG +area: CRUD +type: bug +issues: [] diff --git a/docs/changelog/117153.yaml b/docs/changelog/117153.yaml new file mode 100644 index 0000000000000..f7640c0a7ed6a --- /dev/null +++ b/docs/changelog/117153.yaml @@ -0,0 +1,5 @@ +pr: 117153 +summary: "ESQL: fix the column position in errors" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/117189.yaml b/docs/changelog/117189.yaml new file mode 100644 index 0000000000000..e89c2d81506d9 --- /dev/null +++ b/docs/changelog/117189.yaml @@ -0,0 +1,5 @@ +pr: 117189 +summary: Fix deberta tokenizer bug caused by bug in normalizer +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/117201.yaml b/docs/changelog/117201.yaml new file mode 100644 index 0000000000000..f8a2be35c70a3 --- /dev/null +++ b/docs/changelog/117201.yaml @@ -0,0 +1,6 @@ +pr: 117201 +summary: "Use `field_caps` native nested fields filtering" +area: ES|QL +type: bug +issues: + - 117054 diff --git a/docs/changelog/117243.yaml b/docs/changelog/117243.yaml new file mode 100644 index 0000000000000..f871d476bd0ec --- /dev/null +++ b/docs/changelog/117243.yaml @@ -0,0 +1,5 @@ +pr: 117243 +summary: Bump major version for feature migration system indices +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/117246.yaml b/docs/changelog/117246.yaml new file mode 100644 index 0000000000000..29c4464855967 --- /dev/null +++ b/docs/changelog/117246.yaml @@ -0,0 +1,5 @@ +pr: 117246 +summary: LOOKUP JOIN using field-caps for field mapping +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/117265.yaml b/docs/changelog/117265.yaml new file mode 100644 index 0000000000000..ec6605155538d --- /dev/null +++ b/docs/changelog/117265.yaml @@ -0,0 +1,5 @@ +pr: 117265 +summary: Async search responses have CCS metadata while searches are running +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/117287.yaml b/docs/changelog/117287.yaml new file mode 100644 index 0000000000000..08da9dd8087b2 --- /dev/null +++ b/docs/changelog/117287.yaml @@ -0,0 +1,5 @@ +pr: 117287 +summary: Fixing bug setting index when parsing Google Vertex AI results +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/117294.yaml b/docs/changelog/117294.yaml new file mode 100644 index 0000000000000..f6e80690de7ff --- /dev/null +++ b/docs/changelog/117294.yaml @@ -0,0 +1,5 @@ +pr: 117294 +summary: Always Emit Inference ID in Semantic Text Mapping +area: Mapping +type: bug +issues: [] diff --git a/docs/changelog/117297.yaml b/docs/changelog/117297.yaml new file mode 100644 index 0000000000000..4a0051bbae644 --- /dev/null +++ b/docs/changelog/117297.yaml @@ -0,0 +1,5 @@ +pr: 117297 +summary: Fix CCS exchange when multi cluster aliases point to same cluster +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/117303.yaml b/docs/changelog/117303.yaml new file mode 100644 index 0000000000000..71d134f2cd077 --- /dev/null +++ b/docs/changelog/117303.yaml @@ -0,0 +1,5 @@ +pr: 117303 +summary: Remove HTTP content copies +area: Network +type: enhancement +issues: [] diff --git a/docs/changelog/117312.yaml b/docs/changelog/117312.yaml new file mode 100644 index 0000000000000..302b91388ef2b --- /dev/null +++ b/docs/changelog/117312.yaml @@ -0,0 +1,5 @@ +pr: 117312 +summary: Add missing `async_search` query parameters to rest-api-spec +area: Search +type: bug +issues: [] diff --git a/docs/changelog/117316.yaml b/docs/changelog/117316.yaml new file mode 100644 index 0000000000000..69474d68a8190 --- /dev/null +++ b/docs/changelog/117316.yaml @@ -0,0 +1,5 @@ +pr: 117316 +summary: Fix validation of SORT by aggregate functions +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/117350.yaml b/docs/changelog/117350.yaml new file mode 100644 index 0000000000000..dca54f2037a87 --- /dev/null +++ b/docs/changelog/117350.yaml @@ -0,0 +1,5 @@ +pr: 117350 +summary: "Improve halfbyte transposition performance, marginally improving bbq performance" +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/117404.yaml b/docs/changelog/117404.yaml new file mode 100644 index 0000000000000..0bab171956ca9 --- /dev/null +++ b/docs/changelog/117404.yaml @@ -0,0 +1,5 @@ +pr: 117404 +summary: Correct bit * byte and bit * float script comparisons +area: Vector Search +type: bug +issues: [] diff --git a/docs/changelog/117503.yaml b/docs/changelog/117503.yaml new file mode 100644 index 0000000000000..d48741262b581 --- /dev/null +++ b/docs/changelog/117503.yaml @@ -0,0 +1,6 @@ +pr: 117503 +summary: Fix COUNT filter pushdown +area: ES|QL +type: bug +issues: + - 115522 diff --git a/docs/changelog/117551.yaml b/docs/changelog/117551.yaml new file mode 100644 index 0000000000000..081dd9203d82a --- /dev/null +++ b/docs/changelog/117551.yaml @@ -0,0 +1,5 @@ +pr: 117551 +summary: Fix stats by constant expresson with alias +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/117595.yaml b/docs/changelog/117595.yaml new file mode 100644 index 0000000000000..9360c372ac97e --- /dev/null +++ b/docs/changelog/117595.yaml @@ -0,0 +1,5 @@ +pr: 117595 +summary: Fix for Deberta tokenizer when input sequence exceeds 512 tokens +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/117606.yaml b/docs/changelog/117606.yaml new file mode 100644 index 0000000000000..ea61099a1a6b4 --- /dev/null +++ b/docs/changelog/117606.yaml @@ -0,0 +1,5 @@ +pr: 117606 +summary: Remove deprecated sort from reindex operation within dataframe analytics procedure +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/117618.yaml b/docs/changelog/117618.yaml new file mode 100644 index 0000000000000..5de29e2fe768c --- /dev/null +++ b/docs/changelog/117618.yaml @@ -0,0 +1,5 @@ +pr: 117618 +summary: SearchStatesIt failures reported by CI +area: Search +type: bug +issues: [116617, 116618] diff --git a/docs/changelog/117655.yaml b/docs/changelog/117655.yaml new file mode 100644 index 0000000000000..f2afd3570f104 --- /dev/null +++ b/docs/changelog/117655.yaml @@ -0,0 +1,5 @@ +pr: 117655 +summary: Add nulls support to Categorize +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/117748.yaml b/docs/changelog/117748.yaml new file mode 100644 index 0000000000000..615adbae07ad7 --- /dev/null +++ b/docs/changelog/117748.yaml @@ -0,0 +1,6 @@ +pr: 117748 +summary: Add IMDSv2 support to `repository-s3` +area: Snapshot/Restore +type: enhancement +issues: + - 105135 diff --git a/docs/changelog/117750.yaml b/docs/changelog/117750.yaml new file mode 100644 index 0000000000000..3ba3f1693f4df --- /dev/null +++ b/docs/changelog/117750.yaml @@ -0,0 +1,6 @@ +pr: 117750 +summary: '`CrossClusterIT` `testCancel` failure' +area: Search +type: bug +issues: + - 108061 diff --git a/docs/changelog/90529.yaml b/docs/changelog/90529.yaml new file mode 100644 index 0000000000000..a014c82259a9e --- /dev/null +++ b/docs/changelog/90529.yaml @@ -0,0 +1,26 @@ +pr: 90529 +summary: Output a consistent format when generating error json +area: Infra/REST API +type: "breaking" +issues: + - 89387 +breaking: + title: Error JSON structure has changed when detailed errors are disabled + area: REST API + details: |- + This change modifies the JSON format of error messages returned to REST clients + when detailed messages are turned off. + Previously, JSON returned when an exception occurred, and `http.detailed_errors.enabled: false` was set, + just consisted of a single `"error"` text field with some basic information. + Setting `http.detailed_errors.enabled: true` (the default) changed this field + to an object with more detailed information. + With this change, non-detailed errors now have the same structure as detailed errors. `"error"` will now always + be an object with, at a minimum, a `"type"` and `"reason"` field. Additional fields are included when detailed + errors are enabled. + To use the previous structure for non-detailed errors, use the v8 REST API. + impact: |- + If you have set `http.detailed_errors.enabled: false` (the default is `true`) + the structure of JSON when any exceptions occur now matches the structure when + detailed errors are enabled. + To use the previous structure for non-detailed errors, use the v8 REST API. + notable: false diff --git a/docs/reference/analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc index eed66d81e9132..1bd36f801aa17 100644 --- a/docs/reference/analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc @@ -111,6 +111,18 @@ output. Defaults to `5`. (Optional, Boolean) If `true`, only include the longest matching subword. Defaults to `false`. +`no_sub_matches`:: +(Optional, Boolean) +If `true`, do not match sub tokens in tokens that are in the word list. +Defaults to `false`. + +`no_overlapping_matches`:: +(Optional, Boolean) +If `true`, do not allow overlapping tokens. +Defaults to `false`. + +Typically users will only want to include one of the three flags as enabling `no_overlapping_matches` is the most restrictive and `no_sub_matches` is more restrictive than `only_longest_match`. When enabling a more restrictive option the state of the less restrictive does not have any effect. + [[analysis-hyp-decomp-tokenfilter-customize]] ==== Customize and add to an analyzer diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index bbbea192f0f86..e640fa77c71ee 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -159,6 +159,8 @@ node. <5> The decider which led to the `no` decision for the node. <6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. In this example, a newly created index has <> that requires that it only be allocated to a node named `nonexistent_node`, which does not exist, so the index is unable to allocate. +See https://www.youtube.com/watch?v=5z3n2VgusLE[this video] for a walkthrough of troubleshooting a node and index setting mismatch. + [[maximum-number-of-retries-exceeded]] ====== Maximum number of retries exceeded @@ -235,7 +237,9 @@ primary shard that was previously allocated. ---- // NOTCONSOLE -TIP: If a shard is unassigned with an allocation status of `no_valid_shard_copy`, then you should <>. If all the nodes containing in-sync copies of a shard are lost, then you can <>. +If a shard is unassigned with an allocation status of `no_valid_shard_copy`, then you should <>. If all the nodes containing in-sync copies of a shard are lost, then you can <>. + +See https://www.youtube.com/watch?v=6OAg9IyXFO4[this video] for a walkthrough of troubleshooting `no_valid_shard_copy`. ===== Unassigned replica shard diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index bd818a538f78b..d875417bde51a 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1644,7 +1644,10 @@ The API returns the following response: "total_deduplicated_mapping_size": "0b", "total_deduplicated_mapping_size_in_bytes": 0, "field_types": [], - "runtime_field_types": [] + "runtime_field_types": [], + "source_modes" : { + "stored": 0 + } }, "analysis": { "char_filter_types": [], diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index ca3d100e31e06..3d8bdcca07e2b 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -59,8 +59,8 @@ An example of a transient update: ==== We no longer recommend using transient cluster settings. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear -unexpectedly, resulting in a potentially undesired cluster configuration. See -the <>. +unexpectedly, resulting in a potentially undesired cluster configuration. +// See the <>. ==== // end::transient-settings-warning[] diff --git a/docs/reference/connector/docs/connectors-content-syncs.asciidoc b/docs/reference/connector/docs/connectors-content-syncs.asciidoc index f1745382677a2..0a2eb54047170 100644 --- a/docs/reference/connector/docs/connectors-content-syncs.asciidoc +++ b/docs/reference/connector/docs/connectors-content-syncs.asciidoc @@ -52,7 +52,7 @@ However, a fast, accessible third-party data source that stores huge amounts of [NOTE] ==== -Incremental syncs for the SharePoint Online connector use specific logic. +Incremental syncs for <> and <> connectors use specific logic. All other connectors use the same shared connector framework logic for incremental syncs. ==== diff --git a/docs/reference/connector/docs/sync-rules.asciidoc b/docs/reference/connector/docs/sync-rules.asciidoc index 9b2a77be7db03..3ab72093666b8 100644 --- a/docs/reference/connector/docs/sync-rules.asciidoc +++ b/docs/reference/connector/docs/sync-rules.asciidoc @@ -116,6 +116,12 @@ A "match" is determined based on a condition defined by a combination of "field" The `Field` column should be used to define which field on a given document should be considered. +[NOTE] +==== +Only top-level fields are supported. +Nested/object fields cannot be referenced with "dot notation". +==== + The following rules are available in the `Rule` column: * `equals` - The field value is equal to the specified value. diff --git a/docs/reference/data-streams/tsds-reindex.asciidoc b/docs/reference/data-streams/tsds-reindex.asciidoc index 9d6594db4e779..f4d00f33c179c 100644 --- a/docs/reference/data-streams/tsds-reindex.asciidoc +++ b/docs/reference/data-streams/tsds-reindex.asciidoc @@ -202,7 +202,7 @@ POST /_component_template/destination_template POST /_index_template/2 { "index_patterns": [ - "k8s*" + "k9s*" ], "composed_of": [ "destination_template" diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc index 461c0a1272e96..d0d6d4a455c63 100644 --- a/docs/reference/data-streams/tsds.asciidoc +++ b/docs/reference/data-streams/tsds.asciidoc @@ -339,4 +339,5 @@ include::tsds-index-settings.asciidoc[] include::downsampling.asciidoc[] include::downsampling-ilm.asciidoc[] include::downsampling-manual.asciidoc[] +include::downsampling-dsl.asciidoc[] include::tsds-reindex.asciidoc[] diff --git a/docs/reference/esql/esql-commands.asciidoc b/docs/reference/esql/esql-commands.asciidoc index 235113ac1394a..33e748d7eb7c1 100644 --- a/docs/reference/esql/esql-commands.asciidoc +++ b/docs/reference/esql/esql-commands.asciidoc @@ -38,12 +38,12 @@ image::images/esql/processing-command.svg[A processing command changing an input * <> * <> ifeval::["{release-state}"=="unreleased"] -* experimental:[] <> +//* experimental:[] <> endif::[] * <> * <> ifeval::["{release-state}"=="unreleased"] -* experimental:[] <> +//* experimental:[] <> endif::[] * experimental:[] <> * <> @@ -63,12 +63,12 @@ include::processing-commands/enrich.asciidoc[] include::processing-commands/eval.asciidoc[] include::processing-commands/grok.asciidoc[] ifeval::["{release-state}"=="unreleased"] -include::processing-commands/inlinestats.asciidoc[] +//include::processing-commands/inlinestats.asciidoc[] endif::[] include::processing-commands/keep.asciidoc[] include::processing-commands/limit.asciidoc[] ifeval::["{release-state}"=="unreleased"] -include::processing-commands/lookup.asciidoc[] +//include::processing-commands/lookup.asciidoc[] endif::[] include::processing-commands/mv_expand.asciidoc[] include::processing-commands/rename.asciidoc[] diff --git a/docs/reference/esql/esql-enrich-data.asciidoc b/docs/reference/esql/esql-enrich-data.asciidoc index c48118d1c367a..ad34e29f1a55b 100644 --- a/docs/reference/esql/esql-enrich-data.asciidoc +++ b/docs/reference/esql/esql-enrich-data.asciidoc @@ -138,8 +138,33 @@ include::{es-ref-dir}/ingest/apis/enrich/execute-enrich-policy.asciidoc[tag=upda include::../ingest/enrich.asciidoc[tag=update-enrich-policy] -==== Limitations +==== Enrich Policy Types and Limitations +The {esql} `ENRICH` command supports all three enrich policy types: + +`geo_match`:: +Matches enrich data to incoming documents based on a <>. +For an example, see <>. + +`match`:: +Matches enrich data to incoming documents based on a <>. +For an example, see <>. + +`range`:: +Matches a number, date, or IP address in incoming documents to a range in the +enrich index based on a <>. For an example, +see <>. + // tag::limitations[] -The {esql} `ENRICH` command only supports enrich policies of type `match`. -Furthermore, `ENRICH` only supports enriching on a column of type `keyword`. +While all three enrich policy types are supported, there are some limitations to be aware of: + +* The `geo_match` enrich policy type only supports the `intersects` spatial relation. +* It is required that the `match_field` in the `ENRICH` command is of the correct type. +For example, if the enrich policy is of type `geo_match`, the `match_field` in the `ENRICH` +command must be of type `geo_point` or `geo_shape`. +Likewise, a `range` enrich policy requires a `match_field` of type `integer`, `long`, `date`, or `ip`, +depending on the type of the range field in the original enrich index. +* However, this constraint is relaxed for `range` policies when the `match_field` is of type `KEYWORD`. +In this case the field values will be parsed during query execution, row by row. +If any value fails to parse, the output values for that row will be set to `null`, +an appropriate warning will be produced and the query will continue to execute. // end::limitations[] diff --git a/docs/reference/esql/esql-kibana.asciidoc b/docs/reference/esql/esql-kibana.asciidoc index 85969e19957af..87dd4d87fa8e3 100644 --- a/docs/reference/esql/esql-kibana.asciidoc +++ b/docs/reference/esql/esql-kibana.asciidoc @@ -106,12 +106,27 @@ detailed warning, expand the query bar, and click *warnings*. ==== Query history You can reuse your recent {esql} queries in the query bar. -In the query bar click *Show recent queries*. +In the query bar, click *Show recent queries*. You can then scroll through your recent queries: image::images/esql/esql-discover-query-history.png[align="center",size="50%"] +[discrete] +[[esql-kibana-starred-queries]] +==== Starred queries + +From the query history, you can mark some queries as favorite to find and access them faster later. + +In the query bar, click *Show recent queries*. + +From the **Recent** tab, you can star any queries you want. + +In the **Starred** tab, find all the queries you have previously starred. + +image::images/esql/esql-discover-query-starred.png[align="center",size="50%"] + + [discrete] [[esql-kibana-results-table]] === The results table diff --git a/docs/reference/esql/esql-language.asciidoc b/docs/reference/esql/esql-language.asciidoc index a7c0e5e01a867..151ca803bf2eb 100644 --- a/docs/reference/esql/esql-language.asciidoc +++ b/docs/reference/esql/esql-language.asciidoc @@ -14,6 +14,7 @@ Detailed reference documentation for the {esql} language: * <> * <> * <> +* <> include::esql-syntax.asciidoc[] include::esql-commands.asciidoc[] @@ -23,3 +24,4 @@ include::multivalued-fields.asciidoc[] include::esql-process-data-with-dissect-grok.asciidoc[] include::esql-enrich-data.asciidoc[] include::implicit-casting.asciidoc[] +include::time-spans.asciidoc[] diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 1772e956bd9e2..c2849e4889f98 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -25,6 +25,9 @@ include::processing-commands/limit.asciidoc[tag=limitation] * `alias` * `boolean` * `date` +* `date_nanos` (Tech Preview) +** The following functions don't yet support date nanos: `bucket`, `date_format`, `date_parse`, `date_diff`, `date_extract` +** You can use `to_datetime` to cast to millisecond dates to use unsupported functions * `double` (`float`, `half_float`, `scaled_float` are represented as `double`) * `ip` * `keyword` family including `keyword`, `constant_keyword`, and `wildcard` @@ -50,7 +53,6 @@ include::processing-commands/limit.asciidoc[tag=limitation] ** `position` ** `aggregate_metric_double` * Date/time -** `date_nanos` ** `date_range` * Other types ** `binary` diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 63b8738266132..8e07a627567df 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -92,8 +92,8 @@ https://en.wikipedia.org/wiki/Query_plan[EXPLAIN PLAN]. ifeval::["{release-state}"=="unreleased"] -`table`:: -(Optional, object) Named "table" parameters that can be referenced by the <> command. +//`table`:: +//(Optional, object) Named "table" parameters that can be referenced by the <> command. endif::[] [discrete] diff --git a/docs/reference/esql/esql-syntax.asciidoc b/docs/reference/esql/esql-syntax.asciidoc index c7f741d064310..ba1c4ca820381 100644 --- a/docs/reference/esql/esql-syntax.asciidoc +++ b/docs/reference/esql/esql-syntax.asciidoc @@ -157,21 +157,15 @@ FROM employees ==== Timespan literals Datetime intervals and timespans can be expressed using timespan literals. -Timespan literals are a combination of a number and a qualifier. These -qualifiers are supported: - -* `millisecond`/`milliseconds`/`ms` -* `second`/`seconds`/`sec`/`s` -* `minute`/`minutes`/`min` -* `hour`/`hours`/`h` -* `day`/`days`/`d` -* `week`/`weeks`/`w` -* `month`/`months`/`mo` -* `quarter`/`quarters`/`q` -* `year`/`years`/`yr`/`y` +Timespan literals are a combination of a number and a temporal unit. The +supported temporal units are listed in <>. +More examples of the usages of time spans can be found in +<>. + Timespan literals are not whitespace sensitive. These expressions are all valid: * `1day` * `1 day` * `1 day` + diff --git a/docs/reference/esql/functions/aggregation-functions.asciidoc b/docs/reference/esql/functions/aggregation-functions.asciidoc index 7cdc42ea6cbf9..c2c2508ad5de2 100644 --- a/docs/reference/esql/functions/aggregation-functions.asciidoc +++ b/docs/reference/esql/functions/aggregation-functions.asciidoc @@ -17,10 +17,11 @@ The <> command supports these aggregate functions: * <> * <> * experimental:[] <> +* <> * <> * <> * <> -* experimental:[] <> +* <> // end::agg_list[] include::layout/avg.asciidoc[] @@ -32,6 +33,7 @@ include::layout/median_absolute_deviation.asciidoc[] include::layout/min.asciidoc[] include::layout/percentile.asciidoc[] include::layout/st_centroid_agg.asciidoc[] +include::layout/std_dev.asciidoc[] include::layout/sum.asciidoc[] include::layout/top.asciidoc[] include::layout/values.asciidoc[] diff --git a/docs/reference/esql/functions/binary.asciidoc b/docs/reference/esql/functions/binary.asciidoc index 72d466ae83d11..59bdadecc4923 100644 --- a/docs/reference/esql/functions/binary.asciidoc +++ b/docs/reference/esql/functions/binary.asciidoc @@ -87,6 +87,7 @@ Supported types: include::types/greater_than_or_equal.asciidoc[] +[[esql-add]] ==== Add `+` [.text-center] image::esql/functions/signature/add.svg[Embedded,opts=inline] @@ -98,6 +99,7 @@ Supported types: include::types/add.asciidoc[] +[[esql-subtract]] ==== Subtract `-` [.text-center] image::esql/functions/signature/sub.svg[Embedded,opts=inline] diff --git a/docs/reference/esql/functions/description/kql.asciidoc b/docs/reference/esql/functions/description/kql.asciidoc new file mode 100644 index 0000000000000..e1fe411e6689c --- /dev/null +++ b/docs/reference/esql/functions/description/kql.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Performs a KQL query. Returns true if the provided KQL query string matches the row. diff --git a/docs/reference/esql/functions/description/std_dev.asciidoc b/docs/reference/esql/functions/description/std_dev.asciidoc new file mode 100644 index 0000000000000..b78ddd7dbba13 --- /dev/null +++ b/docs/reference/esql/functions/description/std_dev.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +The standard deviation of a numeric field. diff --git a/docs/reference/esql/functions/examples/count.asciidoc b/docs/reference/esql/functions/examples/count.asciidoc index fb696b51e054c..33ed054d3d1e2 100644 --- a/docs/reference/esql/functions/examples/count.asciidoc +++ b/docs/reference/esql/functions/examples/count.asciidoc @@ -37,7 +37,7 @@ include::{esql-specs}/stats.csv-spec[tag=count-where] |=== include::{esql-specs}/stats.csv-spec[tag=count-where-result] |=== -To count the same stream of data based on two different expressions use the pattern `COUNT( OR NULL)` +To count the same stream of data based on two different expressions use the pattern `COUNT( OR NULL)`. This builds on the three-valued logic ({wikipedia}/Three-valued_logic[3VL]) of the language: `TRUE OR NULL` is `TRUE`, but `FALSE OR NULL` is `NULL`, plus the way COUNT handles `NULL`s: `COUNT(TRUE)` and `COUNT(FALSE)` are both 1, but `COUNT(NULL)` is 0. [source.merge.styled,esql] ---- include::{esql-specs}/stats.csv-spec[tag=count-or-null] diff --git a/docs/reference/esql/functions/examples/kql.asciidoc b/docs/reference/esql/functions/examples/kql.asciidoc new file mode 100644 index 0000000000000..1f8518aeec394 --- /dev/null +++ b/docs/reference/esql/functions/examples/kql.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/kql-function.csv-spec[tag=kql-with-field] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/kql-function.csv-spec[tag=kql-with-field-result] +|=== + diff --git a/docs/reference/esql/functions/examples/std_dev.asciidoc b/docs/reference/esql/functions/examples/std_dev.asciidoc new file mode 100644 index 0000000000000..2e6dc996aae9a --- /dev/null +++ b/docs/reference/esql/functions/examples/std_dev.asciidoc @@ -0,0 +1,22 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=stdev] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=stdev-result] +|=== +The expression can use inline functions. For example, to calculate the standard deviation of each employee's maximum salary changes, first use `MV_MAX` on each row, and then use `STD_DEV` on the result +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=docsStatsStdDevNestedExpression] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=docsStatsStdDevNestedExpression-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/bit_length.json b/docs/reference/esql/functions/kibana/definition/bit_length.json index 156a063984e4d..0c75b76cdbbfb 100644 --- a/docs/reference/esql/functions/kibana/definition/bit_length.json +++ b/docs/reference/esql/functions/kibana/definition/bit_length.json @@ -31,7 +31,7 @@ } ], "examples" : [ - "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length=LENGTH(city), fn_bit_length = BIT_LENGTH(city)" + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city), fn_bit_length = BIT_LENGTH(city)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/byte_length.json b/docs/reference/esql/functions/kibana/definition/byte_length.json index c8280a572fc62..60f439b9d8133 100644 --- a/docs/reference/esql/functions/kibana/definition/byte_length.json +++ b/docs/reference/esql/functions/kibana/definition/byte_length.json @@ -31,7 +31,7 @@ } ], "examples" : [ - "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length=LENGTH(city), fn_byte_length = BYTE_LENGTH(city)" + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city), fn_byte_length = BYTE_LENGTH(city)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index bf498f690551c..51693d9d30660 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -172,6 +172,48 @@ "variadic" : true, "returnType" : "date" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "date_nanos", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "date_nanos", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "date_nanos", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/categorize.json b/docs/reference/esql/functions/kibana/definition/categorize.json index 386b178d3753f..ca3971a6e05a3 100644 --- a/docs/reference/esql/functions/kibana/definition/categorize.json +++ b/docs/reference/esql/functions/kibana/definition/categorize.json @@ -14,7 +14,7 @@ } ], "variadic" : false, - "returnType" : "integer" + "returnType" : "keyword" }, { "params" : [ @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "integer" + "returnType" : "keyword" } ], "preview" : false, diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index 7f49195190951..c929323397c9b 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -88,6 +88,24 @@ "variadic" : true, "returnType" : "date" }, + { + "params" : [ + { + "name" : "first", + "type" : "date_nanos", + "optional" : false, + "description" : "Expression to evaluate." + }, + { + "name" : "rest", + "type" : "date_nanos", + "optional" : true, + "description" : "Other expression to evaluate." + } + ], + "variadic" : true, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/count.json b/docs/reference/esql/functions/kibana/definition/count.json index 88d4ba3d3e339..329a18c4d9d01 100644 --- a/docs/reference/esql/functions/kibana/definition/count.json +++ b/docs/reference/esql/functions/kibana/definition/count.json @@ -151,7 +151,7 @@ ], "examples" : [ "FROM employees\n| STATS COUNT(height)", - "FROM employees \n| STATS count = COUNT(*) BY languages \n| SORT languages DESC", + "FROM employees\n| STATS count = COUNT(*) BY languages\n| SORT languages DESC", "ROW words=\"foo;bar;baz;qux;quux;foo\"\n| STATS word_count = COUNT(SPLIT(words, \";\"))", "ROW n=1\n| WHERE n < 0\n| STATS COUNT(n)", "ROW n=1\n| STATS COUNT(n > 0 OR NULL), COUNT(n < 0 OR NULL)" diff --git a/docs/reference/esql/functions/kibana/definition/count_distinct.json b/docs/reference/esql/functions/kibana/definition/count_distinct.json index 3addd08df60df..54b99ee84ce2d 100644 --- a/docs/reference/esql/functions/kibana/definition/count_distinct.json +++ b/docs/reference/esql/functions/kibana/definition/count_distinct.json @@ -136,6 +136,72 @@ "variadic" : false, "returnType" : "long" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "integer", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Column or literal for which to count the number of distinct values." + }, + { + "name" : "precision", + "type" : "unsigned_long", + "optional" : true, + "description" : "Precision threshold. Refer to <>. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000." + } + ], + "variadic" : false, + "returnType" : "long" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/date_trunc.json b/docs/reference/esql/functions/kibana/definition/date_trunc.json index 871994407233b..cdda984a0ce7e 100644 --- a/docs/reference/esql/functions/kibana/definition/date_trunc.json +++ b/docs/reference/esql/functions/kibana/definition/date_trunc.json @@ -22,6 +22,24 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "interval", + "type" : "date_period", + "optional" : false, + "description" : "Interval; expressed using the timespan literal syntax." + }, + { + "name" : "date", + "type" : "date_nanos", + "optional" : false, + "description" : "Date expression" + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { @@ -39,6 +57,24 @@ ], "variadic" : false, "returnType" : "date" + }, + { + "params" : [ + { + "name" : "interval", + "type" : "time_duration", + "optional" : false, + "description" : "Interval; expressed using the timespan literal syntax." + }, + { + "name" : "date", + "type" : "date_nanos", + "optional" : false, + "description" : "Date expression" + } + ], + "variadic" : false, + "returnType" : "date_nanos" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/equals.json b/docs/reference/esql/functions/kibana/definition/equals.json index 59df59eaccc4e..885d949f4b20f 100644 --- a/docs/reference/esql/functions/kibana/definition/equals.json +++ b/docs/reference/esql/functions/kibana/definition/equals.json @@ -77,6 +77,24 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/greater_than.json b/docs/reference/esql/functions/kibana/definition/greater_than.json index 7354112551e2c..cf6e30a0a4547 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than.json @@ -23,6 +23,24 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json index 832eed417ef4a..2535c68af6acf 100644 --- a/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/greater_than_or_equal.json @@ -23,6 +23,24 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/greatest.json b/docs/reference/esql/functions/kibana/definition/greatest.json index eebb4fad1eb1d..077100317dfca 100644 --- a/docs/reference/esql/functions/kibana/definition/greatest.json +++ b/docs/reference/esql/functions/kibana/definition/greatest.json @@ -53,6 +53,24 @@ "variadic" : true, "returnType" : "date" }, + { + "params" : [ + { + "name" : "first", + "type" : "date_nanos", + "optional" : false, + "description" : "First of the columns to evaluate." + }, + { + "name" : "rest", + "type" : "date_nanos", + "optional" : true, + "description" : "The rest of the columns to evaluate." + } + ], + "variadic" : true, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/kql.json b/docs/reference/esql/functions/kibana/definition/kql.json new file mode 100644 index 0000000000000..6960681fbbf0d --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/kql.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "kql", + "description" : "Performs a KQL query. Returns true if the provided KQL query string matches the row.", + "signatures" : [ + { + "params" : [ + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Query string in KQL query string format." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "query", + "type" : "text", + "optional" : false, + "description" : "Query string in KQL query string format." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ], + "examples" : [ + "FROM books \n| WHERE KQL(\"author: Faulkner\")\n| KEEP book_no, author \n| SORT book_no \n| LIMIT 5;" + ], + "preview" : true, + "snapshot_only" : true +} diff --git a/docs/reference/esql/functions/kibana/definition/least.json b/docs/reference/esql/functions/kibana/definition/least.json index 02fa58f92eaef..18ec65c60f475 100644 --- a/docs/reference/esql/functions/kibana/definition/least.json +++ b/docs/reference/esql/functions/kibana/definition/least.json @@ -52,6 +52,24 @@ "variadic" : true, "returnType" : "date" }, + { + "params" : [ + { + "name" : "first", + "type" : "date_nanos", + "optional" : false, + "description" : "First of the columns to evaluate." + }, + { + "name" : "rest", + "type" : "date_nanos", + "optional" : true, + "description" : "The rest of the columns to evaluate." + } + ], + "variadic" : true, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/length.json b/docs/reference/esql/functions/kibana/definition/length.json index 9ea340ebf7420..bc26acde744f5 100644 --- a/docs/reference/esql/functions/kibana/definition/length.json +++ b/docs/reference/esql/functions/kibana/definition/length.json @@ -31,7 +31,7 @@ } ], "examples" : [ - "FROM airports\n| KEEP city\n| EVAL fn_length = LENGTH(first_name)" + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length = LENGTH(city)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/less_than.json b/docs/reference/esql/functions/kibana/definition/less_than.json index 66578d73b8e9c..a73754d200d46 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than.json +++ b/docs/reference/esql/functions/kibana/definition/less_than.json @@ -23,6 +23,24 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json index 5ffd4567cdb07..7af477db32a34 100644 --- a/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json +++ b/docs/reference/esql/functions/kibana/definition/less_than_or_equal.json @@ -23,6 +23,24 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/match.json b/docs/reference/esql/functions/kibana/definition/match.json index 8a355360a790f..4a5b05a3f501b 100644 --- a/docs/reference/esql/functions/kibana/definition/match.json +++ b/docs/reference/esql/functions/kibana/definition/match.json @@ -78,7 +78,7 @@ } ], "examples" : [ - "from books \n| where match(author, \"Faulkner\")\n| keep book_no, author \n| sort book_no \n| limit 5;" + "FROM books \n| WHERE MATCH(author, \"Faulkner\")\n| KEEP book_no, author \n| SORT book_no \n| LIMIT 5;" ], "preview" : true, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/match_operator.json b/docs/reference/esql/functions/kibana/definition/match_operator.json new file mode 100644 index 0000000000000..7a0ace6168b59 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/match_operator.json @@ -0,0 +1,85 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "operator", + "name" : "match_operator", + "description" : "Performs a match query on the specified field. Returns true if the provided query matches the row.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Text you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "text", + "optional" : false, + "description" : "Text you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "keyword", + "optional" : false, + "description" : "Text you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Field that the query will target." + }, + { + "name" : "query", + "type" : "text", + "optional" : false, + "description" : "Text you wish to find in the provided field." + } + ], + "variadic" : false, + "returnType" : "boolean" + } + ], + "examples" : [ + "FROM books \n| WHERE MATCH(author, \"Faulkner\")\n| KEEP book_no, author \n| SORT book_no \n| LIMIT 5;" + ], + "preview" : true, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 45fd26571b091..7f3d2215ee099 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -28,6 +28,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index ae71fba049dbe..74e3fd8208f1b 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -28,6 +28,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_count.json b/docs/reference/esql/functions/kibana/definition/mv_count.json index 4767b35ec7cac..90ace2525f710 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_count.json +++ b/docs/reference/esql/functions/kibana/definition/mv_count.json @@ -52,6 +52,18 @@ "variadic" : false, "returnType" : "integer" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "integer" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index bfca58bc3e140..ce2c96dbc1757 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -53,6 +53,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index a2b6358023e4b..552f568c9b171 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -52,6 +52,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index b6dc268af5305..78d7b348a6042 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -52,6 +52,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 27d2b010dc02c..a1e55c58cff70 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -28,6 +28,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index 410e97335687f..7998ca4eda94e 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -28,6 +28,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index dbbfe0ffb5a78..df4d48145fac6 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -100,6 +100,30 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression. If `null`, the function returns `null`." + }, + { + "name" : "start", + "type" : "integer", + "optional" : false, + "description" : "Start position. If `null`, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list." + }, + { + "name" : "end", + "type" : "integer", + "optional" : true, + "description" : "End position(included). Optional; if omitted, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index 4cb255fb0afcb..072c05743af33 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -40,6 +40,24 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Multivalue expression. If `null`, the function returns `null`." + }, + { + "name" : "order", + "type" : "keyword", + "optional" : true, + "description" : "Sort order. The valid options are ASC and DESC, the default is ASC." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/not_equals.json b/docs/reference/esql/functions/kibana/definition/not_equals.json index 69389d4c8d077..24f31115cbc37 100644 --- a/docs/reference/esql/functions/kibana/definition/not_equals.json +++ b/docs/reference/esql/functions/kibana/definition/not_equals.json @@ -77,6 +77,24 @@ "variadic" : false, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "An expression." + } + ], + "variadic" : false, + "returnType" : "boolean" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/qstr.json b/docs/reference/esql/functions/kibana/definition/qstr.json index 9823c3cff8923..76473349a3414 100644 --- a/docs/reference/esql/functions/kibana/definition/qstr.json +++ b/docs/reference/esql/functions/kibana/definition/qstr.json @@ -30,7 +30,7 @@ } ], "examples" : [ - "from books \n| where qstr(\"author: Faulkner\")\n| keep book_no, author \n| sort book_no \n| limit 5;" + "FROM books \n| WHERE QSTR(\"author: Faulkner\")\n| KEEP book_no, author \n| SORT book_no \n| LIMIT 5;" ], "preview" : true, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/std_dev.json b/docs/reference/esql/functions/kibana/definition/std_dev.json new file mode 100644 index 0000000000000..f31d3345421d9 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/std_dev.json @@ -0,0 +1,50 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "agg", + "name" : "std_dev", + "description" : "The standard deviation of a numeric field.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "double" + } + ], + "examples" : [ + "FROM employees\n| STATS STD_DEV(height)", + "FROM employees\n| STATS stddev_salary_change = STD_DEV(MV_MAX(salary_change))" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json index 07ffe84444f02..d9409bceb8e6f 100644 --- a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json +++ b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json @@ -4,7 +4,92 @@ "name" : "to_date_nanos", "description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).", "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.", - "signatures" : [ ], + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "date", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "field", + "type" : "double", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "field", + "type" : "long", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "field", + "type" : "unsigned_long", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + } + ], "preview" : true, "snapshot_only" : false } diff --git a/docs/reference/esql/functions/kibana/definition/to_datetime.json b/docs/reference/esql/functions/kibana/definition/to_datetime.json index 072aa66aac669..8f9ecbd139d32 100644 --- a/docs/reference/esql/functions/kibana/definition/to_datetime.json +++ b/docs/reference/esql/functions/kibana/definition/to_datetime.json @@ -17,6 +17,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "date" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/to_long.json b/docs/reference/esql/functions/kibana/definition/to_long.json index afd6de001bbc6..eb1ce7220c3f9 100644 --- a/docs/reference/esql/functions/kibana/definition/to_long.json +++ b/docs/reference/esql/functions/kibana/definition/to_long.json @@ -52,6 +52,18 @@ "variadic" : false, "returnType" : "long" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "long" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/to_string.json b/docs/reference/esql/functions/kibana/definition/to_string.json index 33e95d5bed1c2..1c86e81b31136 100644 --- a/docs/reference/esql/functions/kibana/definition/to_string.json +++ b/docs/reference/esql/functions/kibana/definition/to_string.json @@ -52,6 +52,18 @@ "variadic" : false, "returnType" : "keyword" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "Input value. The input can be a single- or multi-valued column or an expression." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json index ae69febd4f755..95ac402bb242a 100644 --- a/docs/reference/esql/functions/kibana/definition/values.json +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -28,6 +28,18 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "field", + "type" : "date_nanos", + "optional" : false, + "description" : "" + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/docs/bit_length.md b/docs/reference/esql/functions/kibana/docs/bit_length.md index 253b2cdb6a7c6..b1d8e24c4de76 100644 --- a/docs/reference/esql/functions/kibana/docs/bit_length.md +++ b/docs/reference/esql/functions/kibana/docs/bit_length.md @@ -9,6 +9,6 @@ Returns the bit length of a string. FROM airports | WHERE country == "India" | KEEP city -| EVAL fn_length=LENGTH(city), fn_bit_length = BIT_LENGTH(city) +| EVAL fn_length = LENGTH(city), fn_bit_length = BIT_LENGTH(city) ``` Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/byte_length.md b/docs/reference/esql/functions/kibana/docs/byte_length.md index 20d96ce38400d..9cd4f87c9883b 100644 --- a/docs/reference/esql/functions/kibana/docs/byte_length.md +++ b/docs/reference/esql/functions/kibana/docs/byte_length.md @@ -9,6 +9,6 @@ Returns the byte length of a string. FROM airports | WHERE country == "India" | KEEP city -| EVAL fn_length=LENGTH(city), fn_byte_length = BYTE_LENGTH(city) +| EVAL fn_length = LENGTH(city), fn_byte_length = BYTE_LENGTH(city) ``` Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/kql.md b/docs/reference/esql/functions/kibana/docs/kql.md new file mode 100644 index 0000000000000..0ba419c1cd032 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/kql.md @@ -0,0 +1,14 @@ + + +### KQL +Performs a KQL query. Returns true if the provided KQL query string matches the row. + +``` +FROM books +| WHERE KQL("author: Faulkner") +| KEEP book_no, author +| SORT book_no +| LIMIT 5; +``` diff --git a/docs/reference/esql/functions/kibana/docs/length.md b/docs/reference/esql/functions/kibana/docs/length.md index ce7726d092bae..aed76ee14cedb 100644 --- a/docs/reference/esql/functions/kibana/docs/length.md +++ b/docs/reference/esql/functions/kibana/docs/length.md @@ -7,7 +7,8 @@ Returns the character length of a string. ``` FROM airports +| WHERE country == "India" | KEEP city -| EVAL fn_length = LENGTH(first_name) +| EVAL fn_length = LENGTH(city) ``` Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/match.md b/docs/reference/esql/functions/kibana/docs/match.md index 3c06662982bbf..b866637b41b85 100644 --- a/docs/reference/esql/functions/kibana/docs/match.md +++ b/docs/reference/esql/functions/kibana/docs/match.md @@ -6,9 +6,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Performs a match query on the specified field. Returns true if the provided query matches the row. ``` -from books -| where match(author, "Faulkner") -| keep book_no, author -| sort book_no -| limit 5; +FROM books +| WHERE MATCH(author, "Faulkner") +| KEEP book_no, author +| SORT book_no +| LIMIT 5; ``` diff --git a/docs/reference/esql/functions/kibana/docs/match_operator.md b/docs/reference/esql/functions/kibana/docs/match_operator.md new file mode 100644 index 0000000000000..fda8b24ff76cc --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/match_operator.md @@ -0,0 +1,14 @@ + + +### MATCH_OPERATOR +Performs a match query on the specified field. Returns true if the provided query matches the row. + +``` +FROM books +| WHERE MATCH(author, "Faulkner") +| KEEP book_no, author +| SORT book_no +| LIMIT 5; +``` diff --git a/docs/reference/esql/functions/kibana/docs/qstr.md b/docs/reference/esql/functions/kibana/docs/qstr.md index 37b5777623185..9b5dc3f9a22eb 100644 --- a/docs/reference/esql/functions/kibana/docs/qstr.md +++ b/docs/reference/esql/functions/kibana/docs/qstr.md @@ -6,9 +6,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Performs a query string query. Returns true if the provided query string matches the row. ``` -from books -| where qstr("author: Faulkner") -| keep book_no, author -| sort book_no -| limit 5; +FROM books +| WHERE QSTR("author: Faulkner") +| KEEP book_no, author +| SORT book_no +| LIMIT 5; ``` diff --git a/docs/reference/esql/functions/kibana/docs/std_dev.md b/docs/reference/esql/functions/kibana/docs/std_dev.md new file mode 100644 index 0000000000000..a6afca7b8f6b3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/std_dev.md @@ -0,0 +1,11 @@ + + +### STD_DEV +The standard deviation of a numeric field. + +``` +FROM employees +| STATS STD_DEV(height) +``` diff --git a/docs/reference/esql/functions/layout/kql.asciidoc b/docs/reference/esql/functions/layout/kql.asciidoc new file mode 100644 index 0000000000000..8cf2687b240c1 --- /dev/null +++ b/docs/reference/esql/functions/layout/kql.asciidoc @@ -0,0 +1,17 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-kql]] +=== `KQL` + +preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +*Syntax* + +[.text-center] +image::esql/functions/signature/kql.svg[Embedded,opts=inline] + +include::../parameters/kql.asciidoc[] +include::../description/kql.asciidoc[] +include::../types/kql.asciidoc[] +include::../examples/kql.asciidoc[] diff --git a/docs/reference/esql/functions/layout/std_dev.asciidoc b/docs/reference/esql/functions/layout/std_dev.asciidoc new file mode 100644 index 0000000000000..a7a34b1331d17 --- /dev/null +++ b/docs/reference/esql/functions/layout/std_dev.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-std_dev]] +=== `STD_DEV` + +*Syntax* + +[.text-center] +image::esql/functions/signature/std_dev.svg[Embedded,opts=inline] + +include::../parameters/std_dev.asciidoc[] +include::../description/std_dev.asciidoc[] +include::../types/std_dev.asciidoc[] +include::../examples/std_dev.asciidoc[] diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index 4093e44c16911..3da0249c9c0db 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -19,6 +19,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -37,6 +38,7 @@ include::layout/mv_max.asciidoc[] include::layout/mv_median.asciidoc[] include::layout/mv_median_absolute_deviation.asciidoc[] include::layout/mv_min.asciidoc[] +include::layout/mv_percentile.asciidoc[] include::layout/mv_pseries_weighted_sum.asciidoc[] include::layout/mv_slice.asciidoc[] include::layout/mv_sort.asciidoc[] diff --git a/docs/reference/esql/functions/operators.asciidoc b/docs/reference/esql/functions/operators.asciidoc index ee344a52687c2..a1a2226335e9b 100644 --- a/docs/reference/esql/functions/operators.asciidoc +++ b/docs/reference/esql/functions/operators.asciidoc @@ -16,6 +16,7 @@ Boolean operators for comparing against one or multiple expressions. * <> * <> * <> +* experimental:[] <> // end::op_list[] include::binary.asciidoc[] @@ -26,3 +27,4 @@ include::cast.asciidoc[] include::in.asciidoc[] include::like.asciidoc[] include::rlike.asciidoc[] +include::search.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/kql.asciidoc b/docs/reference/esql/functions/parameters/kql.asciidoc new file mode 100644 index 0000000000000..6fb69323ff73c --- /dev/null +++ b/docs/reference/esql/functions/parameters/kql.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`query`:: +Query string in KQL query string format. diff --git a/docs/reference/esql/functions/parameters/std_dev.asciidoc b/docs/reference/esql/functions/parameters/std_dev.asciidoc new file mode 100644 index 0000000000000..91c56709d182a --- /dev/null +++ b/docs/reference/esql/functions/parameters/std_dev.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: + diff --git a/docs/reference/esql/functions/search.asciidoc b/docs/reference/esql/functions/search.asciidoc new file mode 100644 index 0000000000000..ae1b003b65abb --- /dev/null +++ b/docs/reference/esql/functions/search.asciidoc @@ -0,0 +1,23 @@ +[discrete] +[[esql-search-operators]] +=== Search operators + +The only search operator is match (`:`). + +preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + +The match operator performs a <> on the specified field. Returns true if the provided query matches the row. + +[.text-center] +image::esql/functions/signature/match_operator.svg[Embedded,opts=inline] + +include::types/match.asciidoc[] + +[source.merge.styled,esql] +---- +include::{esql-specs}/match-operator.csv-spec[tag=match-with-field] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/match-operator.csv-spec[tag=match-with-field-result] +|=== diff --git a/docs/reference/esql/functions/signature/kql.svg b/docs/reference/esql/functions/signature/kql.svg new file mode 100644 index 0000000000000..3f550f27ccdff --- /dev/null +++ b/docs/reference/esql/functions/signature/kql.svg @@ -0,0 +1 @@ +KQL(query) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/match_operator.svg b/docs/reference/esql/functions/signature/match_operator.svg new file mode 100644 index 0000000000000..70cea841622eb --- /dev/null +++ b/docs/reference/esql/functions/signature/match_operator.svg @@ -0,0 +1 @@ +field:query \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/std_dev.svg b/docs/reference/esql/functions/signature/std_dev.svg new file mode 100644 index 0000000000000..606d285154f59 --- /dev/null +++ b/docs/reference/esql/functions/signature/std_dev.svg @@ -0,0 +1 @@ +STD_DEV(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/spatial-functions.asciidoc b/docs/reference/esql/functions/spatial-functions.asciidoc index 79acc2028d983..eee44d337b4c6 100644 --- a/docs/reference/esql/functions/spatial-functions.asciidoc +++ b/docs/reference/esql/functions/spatial-functions.asciidoc @@ -8,19 +8,19 @@ {esql} supports these spatial functions: // tag::spatial_list[] -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> // end::spatial_list[] +include::layout/st_distance.asciidoc[] include::layout/st_intersects.asciidoc[] include::layout/st_disjoint.asciidoc[] include::layout/st_contains.asciidoc[] include::layout/st_within.asciidoc[] include::layout/st_x.asciidoc[] include::layout/st_y.asciidoc[] -include::layout/st_distance.asciidoc[] diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index c6fb6a091e9d0..9e6915a37fc14 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -13,6 +13,8 @@ boolean | cartesian_shape | cartesian_shape | cartesian_shape boolean | cartesian_shape | | cartesian_shape boolean | date | date | date boolean | date | | date +boolean | date_nanos | date_nanos | date_nanos +boolean | date_nanos | | date_nanos boolean | double | double | double boolean | double | | double boolean | geo_point | geo_point | geo_point diff --git a/docs/reference/esql/functions/types/categorize.asciidoc b/docs/reference/esql/functions/types/categorize.asciidoc index 4917ed313e6d7..5b64971cbc482 100644 --- a/docs/reference/esql/functions/types/categorize.asciidoc +++ b/docs/reference/esql/functions/types/categorize.asciidoc @@ -5,6 +5,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | result -keyword | integer -text | integer +keyword | keyword +text | keyword |=== diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 23a249494e0a2..b6479dc7ff86a 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -10,6 +10,7 @@ boolean | | boolean cartesian_point | cartesian_point | cartesian_point cartesian_shape | cartesian_shape | cartesian_shape date | date | date +date_nanos | date_nanos | date_nanos geo_point | geo_point | geo_point geo_shape | geo_shape | geo_shape integer | integer | integer diff --git a/docs/reference/esql/functions/types/count_distinct.asciidoc b/docs/reference/esql/functions/types/count_distinct.asciidoc index c365c8814573c..f5758a8914d20 100644 --- a/docs/reference/esql/functions/types/count_distinct.asciidoc +++ b/docs/reference/esql/functions/types/count_distinct.asciidoc @@ -13,6 +13,10 @@ date | integer | long date | long | long date | unsigned_long | long date | | long +date_nanos | integer | long +date_nanos | long | long +date_nanos | unsigned_long | long +date_nanos | | long double | integer | long double | long | long double | unsigned_long | long diff --git a/docs/reference/esql/functions/types/date_trunc.asciidoc b/docs/reference/esql/functions/types/date_trunc.asciidoc index aa7dee99c6c44..c610f9119e51c 100644 --- a/docs/reference/esql/functions/types/date_trunc.asciidoc +++ b/docs/reference/esql/functions/types/date_trunc.asciidoc @@ -6,5 +6,7 @@ |=== interval | date | result date_period | date | date +date_period | date_nanos | date_nanos time_duration | date | date +time_duration | date_nanos | date_nanos |=== diff --git a/docs/reference/esql/functions/types/equals.asciidoc b/docs/reference/esql/functions/types/equals.asciidoc index ad0e46ef4b8da..8d48b7ebf084a 100644 --- a/docs/reference/esql/functions/types/equals.asciidoc +++ b/docs/reference/esql/functions/types/equals.asciidoc @@ -9,6 +9,7 @@ boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean date | date | boolean +date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than.asciidoc b/docs/reference/esql/functions/types/greater_than.asciidoc index c506328126a94..8000fd34c8507 100644 --- a/docs/reference/esql/functions/types/greater_than.asciidoc +++ b/docs/reference/esql/functions/types/greater_than.asciidoc @@ -6,6 +6,7 @@ |=== lhs | rhs | result date | date | boolean +date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc index c506328126a94..8000fd34c8507 100644 --- a/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/greater_than_or_equal.asciidoc @@ -6,6 +6,7 @@ |=== lhs | rhs | result date | date | boolean +date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/greatest.asciidoc b/docs/reference/esql/functions/types/greatest.asciidoc index 7df77a6991315..0bc11b569d426 100644 --- a/docs/reference/esql/functions/types/greatest.asciidoc +++ b/docs/reference/esql/functions/types/greatest.asciidoc @@ -8,6 +8,7 @@ first | rest | result boolean | boolean | boolean boolean | | boolean date | date | date +date_nanos | date_nanos | date_nanos double | double | double integer | integer | integer integer | | integer diff --git a/docs/reference/esql/functions/types/kql.asciidoc b/docs/reference/esql/functions/types/kql.asciidoc new file mode 100644 index 0000000000000..866a39e925665 --- /dev/null +++ b/docs/reference/esql/functions/types/kql.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +query | result +keyword | boolean +text | boolean +|=== diff --git a/docs/reference/esql/functions/types/least.asciidoc b/docs/reference/esql/functions/types/least.asciidoc index 7df77a6991315..0bc11b569d426 100644 --- a/docs/reference/esql/functions/types/least.asciidoc +++ b/docs/reference/esql/functions/types/least.asciidoc @@ -8,6 +8,7 @@ first | rest | result boolean | boolean | boolean boolean | | boolean date | date | date +date_nanos | date_nanos | date_nanos double | double | double integer | integer | integer integer | | integer diff --git a/docs/reference/esql/functions/types/less_than.asciidoc b/docs/reference/esql/functions/types/less_than.asciidoc index c506328126a94..8000fd34c8507 100644 --- a/docs/reference/esql/functions/types/less_than.asciidoc +++ b/docs/reference/esql/functions/types/less_than.asciidoc @@ -6,6 +6,7 @@ |=== lhs | rhs | result date | date | boolean +date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc index c506328126a94..8000fd34c8507 100644 --- a/docs/reference/esql/functions/types/less_than_or_equal.asciidoc +++ b/docs/reference/esql/functions/types/less_than_or_equal.asciidoc @@ -6,6 +6,7 @@ |=== lhs | rhs | result date | date | boolean +date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/match_operator.asciidoc b/docs/reference/esql/functions/types/match_operator.asciidoc new file mode 100644 index 0000000000000..7523b29c62b1d --- /dev/null +++ b/docs/reference/esql/functions/types/match_operator.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | query | result +keyword | keyword | boolean +keyword | text | boolean +text | keyword | boolean +text | text | boolean +|=== diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 564fb8dc3bfb0..adf457dac31b8 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -7,6 +7,7 @@ field | result boolean | boolean date | date +date_nanos | date_nanos double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 564fb8dc3bfb0..adf457dac31b8 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -7,6 +7,7 @@ field | result boolean | boolean date | date +date_nanos | date_nanos double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index 260c531731f04..c58c4eda44396 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -9,6 +9,7 @@ boolean | integer cartesian_point | integer cartesian_shape | integer date | integer +date_nanos | integer double | integer geo_point | integer geo_shape | integer diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index 976de79bb0910..1524ec86cd5ec 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -9,6 +9,7 @@ boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape date | date +date_nanos | date_nanos double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index 47736e76d1db4..e68af2f992b43 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -9,6 +9,7 @@ boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape date | date +date_nanos | date_nanos double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index 47736e76d1db4..e68af2f992b43 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -9,6 +9,7 @@ boolean | boolean cartesian_point | cartesian_point cartesian_shape | cartesian_shape date | date +date_nanos | date_nanos double | double geo_point | geo_point geo_shape | geo_shape diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index d4e014554c86c..ffba14489b97c 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -7,6 +7,7 @@ field | result boolean | boolean date | date +date_nanos | date_nanos double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index d4e014554c86c..ffba14489b97c 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -7,6 +7,7 @@ field | result boolean | boolean date | date +date_nanos | date_nanos double | double integer | integer ip | ip diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 60c1f6315a599..75f45e333ee0c 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -9,6 +9,7 @@ boolean | integer | integer | boolean cartesian_point | integer | integer | cartesian_point cartesian_shape | integer | integer | cartesian_shape date | integer | integer | date +date_nanos | integer | integer | date_nanos double | integer | integer | double geo_point | integer | integer | geo_point geo_shape | integer | integer | geo_shape diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index c21ea5983945e..83d3e45c7be02 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -7,6 +7,7 @@ field | order | result boolean | keyword | boolean date | keyword | date +date_nanos | keyword | date_nanos double | keyword | double integer | keyword | integer ip | keyword | ip diff --git a/docs/reference/esql/functions/types/not_equals.asciidoc b/docs/reference/esql/functions/types/not_equals.asciidoc index ad0e46ef4b8da..8d48b7ebf084a 100644 --- a/docs/reference/esql/functions/types/not_equals.asciidoc +++ b/docs/reference/esql/functions/types/not_equals.asciidoc @@ -9,6 +9,7 @@ boolean | boolean | boolean cartesian_point | cartesian_point | boolean cartesian_shape | cartesian_shape | boolean date | date | boolean +date_nanos | date_nanos | boolean double | double | boolean double | integer | boolean double | long | boolean diff --git a/docs/reference/esql/functions/types/std_dev.asciidoc b/docs/reference/esql/functions/types/std_dev.asciidoc new file mode 100644 index 0000000000000..273dae4af76c2 --- /dev/null +++ b/docs/reference/esql/functions/types/std_dev.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +double | double +integer | double +long | double +|=== diff --git a/docs/reference/esql/functions/types/to_date_nanos.asciidoc b/docs/reference/esql/functions/types/to_date_nanos.asciidoc index 1f50b65f25a77..dec6833d14b08 100644 --- a/docs/reference/esql/functions/types/to_date_nanos.asciidoc +++ b/docs/reference/esql/functions/types/to_date_nanos.asciidoc @@ -5,5 +5,11 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== field | result -date_nanos +date | date_nanos +date_nanos | date_nanos +double | date_nanos +keyword | date_nanos +long | date_nanos +text | date_nanos +unsigned_long | date_nanos |=== diff --git a/docs/reference/esql/functions/types/to_datetime.asciidoc b/docs/reference/esql/functions/types/to_datetime.asciidoc index 80c986efca794..118ed6c09c11c 100644 --- a/docs/reference/esql/functions/types/to_datetime.asciidoc +++ b/docs/reference/esql/functions/types/to_datetime.asciidoc @@ -6,6 +6,7 @@ |=== field | result date | date +date_nanos | date double | date integer | date keyword | date diff --git a/docs/reference/esql/functions/types/to_long.asciidoc b/docs/reference/esql/functions/types/to_long.asciidoc index a07990cb1cfbf..1009543c1bbde 100644 --- a/docs/reference/esql/functions/types/to_long.asciidoc +++ b/docs/reference/esql/functions/types/to_long.asciidoc @@ -9,6 +9,7 @@ boolean | long counter_integer | long counter_long | long date | long +date_nanos | long double | long integer | long keyword | long diff --git a/docs/reference/esql/functions/types/to_string.asciidoc b/docs/reference/esql/functions/types/to_string.asciidoc index 26a5b31a2a589..9d4188214b3d8 100644 --- a/docs/reference/esql/functions/types/to_string.asciidoc +++ b/docs/reference/esql/functions/types/to_string.asciidoc @@ -9,6 +9,7 @@ boolean | keyword cartesian_point | keyword cartesian_shape | keyword date | keyword +date_nanos | keyword double | keyword geo_point | keyword geo_shape | keyword diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc index 564fb8dc3bfb0..adf457dac31b8 100644 --- a/docs/reference/esql/functions/types/values.asciidoc +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -7,6 +7,7 @@ field | result boolean | boolean date | date +date_nanos | date_nanos double | double integer | integer ip | ip diff --git a/docs/reference/esql/implicit-casting.asciidoc b/docs/reference/esql/implicit-casting.asciidoc index f0c0aa3d82063..b24be0b645472 100644 --- a/docs/reference/esql/implicit-casting.asciidoc +++ b/docs/reference/esql/implicit-casting.asciidoc @@ -5,7 +5,7 @@ Implicit casting ++++ -Often users will input `datetime`, `ip`, `version`, or geospatial objects as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types. +Often users will input `date`, `date_period`, `time_duration`, `ip` or `version` as simple strings in their queries for use in predicates, functions, or expressions. {esql} provides <> to explicitly convert these strings into the desired data types. Without implicit casting users must explicitly code these `to_X` functions in their queries, when string literals don't match the target data types they are assigned or compared to. Here is an example of using `to_datetime` to explicitly perform a data type conversion. @@ -18,7 +18,10 @@ FROM employees | LIMIT 1 ---- -Implicit casting improves usability, by automatically converting string literals to the target data type. This is most useful when the target data type is `datetime`, `ip`, `version` or a geo spatial. It is natural to specify these as a string in queries. +[discrete] +[[esql-implicit-casting-example]] +==== Implicit casting example +Implicit casting automatically converts string literals to the target data type. This allows users to specify string values for types like `date`, `date_period`, `time_duration`, `ip` and `version` in their queries. The first query can be coded without calling the `to_datetime` function, as follows: @@ -31,23 +34,36 @@ FROM employees | LIMIT 1 ---- -[float] -=== Implicit casting support +[discrete] +[[esql-implicit-casting-supported-operations]] +==== Operations that support implicit casting The following table details which {esql} operations support implicit casting for different data types. [%header.monospaced.styled,format=dsv,separator=|] |=== -||ScalarFunction|BinaryComparison|ArithmeticOperation|InListPredicate|AggregateFunction -|DATETIME|Y|Y|Y|Y|N -|DOUBLE|Y|N|N|N|N -|LONG|Y|N|N|N|N -|INTEGER|Y|N|N|N|N -|IP|Y|Y|Y|Y|N -|VERSION|Y|Y|Y|Y|N -|GEO_POINT|Y|N|N|N|N -|GEO_SHAPE|Y|N|N|N|N -|CARTESIAN_POINT|Y|N|N|N|N -|CARTESIAN_SHAPE|Y|N|N|N|N -|BOOLEAN|Y|Y|Y|Y|N +|ScalarFunctions|Operators|<>|<> +DATE|Y|Y|Y|N +DATE_PERIOD/TIME_DURATION|Y|N|Y|N +IP|Y|Y|Y|N +VERSION|Y|Y|Y|N +BOOLEAN|Y|Y|Y|N |=== + +ScalarFunctions includes: + +* <> + +* <> + +* <> + + +Operators includes: + +* <> + +* <> + +* <> + diff --git a/docs/reference/esql/processing-commands/inlinestats.asciidoc b/docs/reference/esql/processing-commands/inlinestats.disabled similarity index 100% rename from docs/reference/esql/processing-commands/inlinestats.asciidoc rename to docs/reference/esql/processing-commands/inlinestats.disabled diff --git a/docs/reference/esql/processing-commands/lookup.asciidoc b/docs/reference/esql/processing-commands/lookup.disabled similarity index 100% rename from docs/reference/esql/processing-commands/lookup.asciidoc rename to docs/reference/esql/processing-commands/lookup.disabled diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index 0c479c1f62b76..3ed296fb6db24 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -1,16 +1,18 @@ [discrete] [[esql-stats-by]] -=== `STATS ... BY` +=== `STATS` -The `STATS ... BY` processing command groups rows according to a common value +The `STATS` processing command groups rows according to a common value and calculates one or more aggregated values over the grouped rows. **Syntax** [source,esql] ---- -STATS [column1 =] expression1[, ..., [columnN =] expressionN] -[BY grouping_expression1[, ..., grouping_expressionN]] +STATS [column1 =] expression1 [WHERE boolean_expression1][, + ..., + [columnN =] expressionN [WHERE boolean_expressionN]] + [BY grouping_expression1[, ..., grouping_expressionN]] ---- *Parameters* @@ -28,14 +30,18 @@ An expression that computes an aggregated value. An expression that outputs the values to group by. If its name coincides with one of the computed columns, that column will be ignored. +`boolean_expressionX`:: +The condition that must be met for a row to be included in the evaluation of `expressionX`. + NOTE: Individual `null` values are skipped when computing aggregations. *Description* -The `STATS ... BY` processing command groups rows according to a common value -and calculate one or more aggregated values over the grouped rows. If `BY` is -omitted, the output table contains exactly one row with the aggregations applied -over the entire dataset. +The `STATS` processing command groups rows according to a common value +and calculates one or more aggregated values over the grouped rows. For the +calculation of each aggregated value, the rows in a group can be filtered with +`WHERE`. If `BY` is omitted, the output table contains exactly one row with +the aggregations applied over the entire dataset. The following <> are supported: @@ -90,6 +96,29 @@ include::{esql-specs}/stats.csv-spec[tag=statsCalcMultipleValues] include::{esql-specs}/stats.csv-spec[tag=statsCalcMultipleValues-result] |=== +To filter the rows that go into an aggregation, use the `WHERE` clause: + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=aggFiltering] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=aggFiltering-result] +|=== + +The aggregations can be mixed, with and without a filter and grouping is +optional as well: + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats.csv-spec[tag=aggFilteringNoGroup] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats.csv-spec[tag=aggFilteringNoGroup-result] +|=== + [[esql-stats-mv-group]] If the grouping key is multivalued then the input row is in all groups: @@ -109,7 +138,7 @@ It's also possible to group by multiple values: include::{esql-specs}/stats.csv-spec[tag=statsGroupByMultipleValues] ---- -If the all grouping keys are multivalued then the input row is in all groups: +If all the grouping keys are multivalued then the input row is in all groups: [source.merge.styled,esql] ---- @@ -121,7 +150,7 @@ include::{esql-specs}/stats.csv-spec[tag=multi-mv-group-result] |=== Both the aggregating functions and the grouping expressions accept other -functions. This is useful for using `STATS...BY` on multivalue columns. +functions. This is useful for using `STATS` on multivalue columns. For example, to calculate the average salary change, you can use `MV_AVG` to first average the multiple values per employee, and use the result with the `AVG` function: diff --git a/docs/reference/esql/time-spans.asciidoc b/docs/reference/esql/time-spans.asciidoc new file mode 100644 index 0000000000000..d2aa0c4fa252e --- /dev/null +++ b/docs/reference/esql/time-spans.asciidoc @@ -0,0 +1,111 @@ +[[esql-time-spans]] +=== {esql} time spans + +++++ +Time spans +++++ + +Time spans represent intervals between two datetime values. There are currently two supported types of time spans: + +* `DATE_PERIOD` specifies intervals in years, quarters, months, weeks and days +* `TIME_DURATION` specifies intervals in hours, minutes, seconds and milliseconds + +A time span requires two elements: an integer value and a temporal unit. + +Time spans work with grouping functions such as <>, scalar functions such as <> and arithmetic operators such as <> and <>. Convert strings to time spans using <>, <>, or the cast operators `::DATE_PERIOD`, `::TIME_DURATION`. + +[discrete] +[[esql-time-spans-examples]] +==== Examples of using time spans in {esql} + + +With `BUCKET`: +[source.merge.styled,esql] +---- +include::{esql-specs}/bucket.csv-spec[tag=docsBucketWeeklyHistogramWithSpan] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/bucket.csv-spec[tag=docsBucketWeeklyHistogramWithSpan-result] +|=== + + +With `DATE_TRUNC`: +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateTrunc] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateTrunc-result] +|=== + + +With `+` and/or `-`: +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsNowWhere] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsNowWhere-result] +|=== + + +When a time span is provided as a named parameter in string format, `TO_DATEPERIOD`, `::DATE_PERIOD`, `TO_TIMEDURATION` or `::TIME_DURATION` can be used to convert to its corresponding time span value for arithmetic operations like `+` and/or `-`. +[source, esql] +---- +POST /_query +{ + "query": """ + FROM employees + | EVAL x = hire_date + ?timespan::DATE_PERIOD, y = hire_date - TO_DATEPERIOD(?timespan) + """, + "params": [{"timespan" : "1 day"}] +} +---- + +When a time span is provided as a named parameter in string format, it can be automatically converted to its corresponding time span value in grouping functions and scalar functions, like `BUCKET` and `DATE_TRUNC`. +[source, esql] +---- +POST /_query +{ + "query": """ + FROM employees + | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" + | STATS hires_per_week = COUNT(*) BY week = BUCKET(hire_date, ?timespan) + | SORT week + """, + "params": [{"timespan" : "1 week"}] +} +---- + +[source, esql] +---- +POST /_query +{ + "query": """ + FROM employees + | KEEP first_name, last_name, hire_date + | EVAL year_hired = DATE_TRUNC(?timespan, hire_date) + """, + "params": [{"timespan" : "1 year"}] +} +---- + +[discrete] +[[esql-time-spans-table]] +==== Supported temporal units +[%header.monospaced.styled,format=dsv,separator=|] +|=== +Temporal Units|Valid Abbreviations +year|y, yr, years +quarter|q, quarters +month|mo, months +week|w, weeks +day|d, days +hour|h, hours +minute|min, minutes +second|s, sec, seconds +millisecond|ms, milliseconds +|=== diff --git a/docs/reference/geospatial-analysis.asciidoc b/docs/reference/geospatial-analysis.asciidoc index 6760040e14bc7..678e0ee17aec2 100644 --- a/docs/reference/geospatial-analysis.asciidoc +++ b/docs/reference/geospatial-analysis.asciidoc @@ -38,11 +38,11 @@ Data is often messy and incomplete. <> lets you clean, <> has support for <> functions, enabling efficient index searching for documents that intersect with, are within, are contained by, or are disjoint from a query geometry. In addition, the `ST_DISTANCE` function calculates the distance between two points. -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> -* experimental:[] <> +* <> +* <> +* <> +* <> +* <> [discrete] [[geospatial-aggregate]] diff --git a/docs/reference/how-to.asciidoc b/docs/reference/how-to.asciidoc index ec514eb05be29..eeac2fe5c2f50 100644 --- a/docs/reference/how-to.asciidoc +++ b/docs/reference/how-to.asciidoc @@ -1,23 +1,21 @@ [[how-to]] -= How to += Optimizations -[partintro] --- -Elasticsearch ships with defaults which are intended to give a good out of -the box experience. Full text search, highlighting, aggregations, and indexing -should all just work without the user having to change anything. +Elasticsearch's default settings provide a good out-of-box experience for basic operations like full text search, highlighting, aggregations, and indexing. -Once you better understand how you want to use Elasticsearch, however, -there are a number of optimizations you can make to improve performance -for your use case. +However, there are a number of optimizations you can make to improve performance for your use case. -This section provides guidance about which changes should and shouldn't be -made. --- +This section provides recommendations for various use cases. -include::how-to/general.asciidoc[] +* <> +* <> +* <> +* <> +* <> +* <> +* <> -include::how-to/recipes.asciidoc[] +include::how-to/general.asciidoc[] include::how-to/indexing-speed.asciidoc[] diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc index 12de469c68449..d4cdb85e4d624 100644 --- a/docs/reference/how-to/indexing-speed.asciidoc +++ b/docs/reference/how-to/indexing-speed.asciidoc @@ -112,7 +112,7 @@ different nodes so there's redundancy for any node failures. You can also use insurance. [discrete] -==== Local vs.remote storage +==== Local vs. remote storage include::./remote-storage.asciidoc[] diff --git a/docs/reference/how-to/recipes.asciidoc b/docs/reference/how-to/recipes.asciidoc index b46f624aef51d..de23404be6164 100644 --- a/docs/reference/how-to/recipes.asciidoc +++ b/docs/reference/how-to/recipes.asciidoc @@ -1,7 +1,7 @@ [[recipes]] -== Recipes +== Search relevance optimizations -This section includes a few recipes to help with common problems: +This section includes a few recipes to help with common search relevance issues: * <> * <> diff --git a/docs/reference/how-to/recipes/scoring.asciidoc b/docs/reference/how-to/recipes/scoring.asciidoc index 5c5a8977d34d4..a578826e31fac 100644 --- a/docs/reference/how-to/recipes/scoring.asciidoc +++ b/docs/reference/how-to/recipes/scoring.asciidoc @@ -88,8 +88,9 @@ pages independently of the query. There are two main queries that allow combining static score contributions with textual relevance, eg. as computed with BM25: - - <> - - <> + +* <> +* <> For instance imagine that you have a `pagerank` field that you wish to combine with the BM25 score so that the final score is equal to diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index e8df44653e9c5..911dc8b9cce40 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -8,8 +8,9 @@ When this happens, {ilm-init} moves the index to an `ERROR` step. If {ilm-init} cannot resolve the error automatically, execution is halted until you resolve the underlying issues with the policy, index, or cluster. -See this https://www.youtube.com/watch?v=VCIqkji3IwY[{ilm-init} health video] -for example troubleshooting walkthrough. +See https://www.youtube.com/watch?v=VCIqkji3IwY[this video] +for a walkthrough of troubleshooting current {ilm-init} health issues, and https://www.youtube.com/watch?v=onrnnwjYWSQ[this video] +for a walkthrough of troubleshooting historical {ilm-init} issues. For example, you might have a `shrink-index` policy that shrinks an index to four shards once it is at least five days old: diff --git a/docs/reference/images/esql/esql-discover-query-history.png b/docs/reference/images/esql/esql-discover-query-history.png index ff1d2ffa8b280..eb064684af700 100644 Binary files a/docs/reference/images/esql/esql-discover-query-history.png and b/docs/reference/images/esql/esql-discover-query-history.png differ diff --git a/docs/reference/images/esql/esql-discover-query-starred.png b/docs/reference/images/esql/esql-discover-query-starred.png new file mode 100644 index 0000000000000..525aa9acbea28 Binary files /dev/null and b/docs/reference/images/esql/esql-discover-query-starred.png differ diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index 0103b425faefe..cd06e6d7b2f64 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -69,15 +69,15 @@ include::inference-shared.asciidoc[tag=service-settings] These settings are specific to the `elasticsearch` service. -- -`adaptive_allocations`::: -(Optional, object) -include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] - `deployment_id`::: (Optional, string) The `deployment_id` of an existing trained model deployment. When `deployment_id` is used the `model_id` is optional. +`adaptive_allocations`::: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation] + `enabled`:::: (Optional, Boolean) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=adaptive-allocation-enabled] @@ -119,7 +119,6 @@ include::inference-shared.asciidoc[tag=task-settings] Returns the document instead of only the index. Defaults to `true`. ===== - [discrete] [[inference-example-elasticsearch-elser]] ==== ELSER via the `elasticsearch` service @@ -137,7 +136,7 @@ PUT _inference/sparse_embedding/my-elser-model "adaptive_allocations": { <1> "enabled": true, "min_number_of_allocations": 1, - "max_number_of_allocations": 10 + "max_number_of_allocations": 4 }, "num_threads": 1, "model_id": ".elser_model_2" <2> @@ -150,6 +149,34 @@ PUT _inference/sparse_embedding/my-elser-model Valid values are `.elser_model_2` and `.elser_model_2_linux-x86_64`. For further details, refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation]. +[discrete] +[[inference-example-elastic-reranker]] +==== Elastic Rerank via the `elasticsearch` service + +The following example shows how to create an {infer} endpoint called `my-elastic-rerank` to perform a `rerank` task type using the built-in Elastic Rerank cross-encoder model. + +The API request below will automatically download the Elastic Rerank model if it isn't already downloaded and then deploy the model. +Once deployed, the model can be used for semantic re-ranking with a <>. + +[source,console] +------------------------------------------------------------ +PUT _inference/rerank/my-elastic-rerank +{ + "service": "elasticsearch", + "service_settings": { + "model_id": ".rerank-v1", <1> + "num_threads": 1, + "adaptive_allocations": { <2> + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 4 + } + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The `model_id` must be the ID of the built-in Elastic Rerank model: `.rerank-v1`. +<2> {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[Adaptive allocations] will be enabled with the minimum of 1 and the maximum of 10 allocations. [discrete] [[inference-example-elasticsearch]] @@ -186,7 +213,7 @@ If using the Python client, you can set the `timeout` parameter to a higher valu [discrete] [[inference-example-eland]] -==== Models uploaded by Eland via the elasticsearch service +==== Models uploaded by Eland via the `elasticsearch` service The following example shows how to create an {infer} endpoint called `my-msmarco-minilm-model` to perform a `text_embedding` task type. diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 262bdfbca002f..c1cc23c8c9adb 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -102,10 +102,39 @@ If `adaptive_allocations` is enabled, do not set this value, because it's automa Sets the number of threads used by each model allocation during inference. This generally increases the speed per inference request. The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. Must be a power of 2. Max allowed value is 32. +[discrete] +[[inference-example-elser-adaptive-allocation]] +==== ELSER service example with adaptive allocations + +When adaptive allocations are enabled, the number of allocations of the model is set automatically based on the current load. + +NOTE: For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. +To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. + +The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type and configure adaptive allocations. + +The request below will automatically download the ELSER model if it isn't already downloaded and then deploy the model. + +[source,console] +------------------------------------------------------------ +PUT _inference/sparse_embedding/my-elser-model +{ + "service": "elser", + "service_settings": { + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, + "num_threads": 1 + } +} +------------------------------------------------------------ +// TEST[skip:TBD] [discrete] [[inference-example-elser]] -==== ELSER service example +==== ELSER service example without adaptive allocations The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type. Refer to the {ml-docs}/ml-nlp-elser.html[ELSER model documentation] for more info. @@ -151,32 +180,4 @@ You might see a 502 bad gateway error in the response when using the {kib} Conso This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the {ml-app} UI. If using the Python client, you can set the `timeout` parameter to a higher value. -==== - -[discrete] -[[inference-example-elser-adaptive-allocation]] -==== Setting adaptive allocations for the ELSER service - -NOTE: For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. -To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. - -The following example shows how to create an {infer} endpoint called `my-elser-model` to perform a `sparse_embedding` task type and configure adaptive allocations. - -The request below will automatically download the ELSER model if it isn't already downloaded and then deploy the model. - -[source,console] ------------------------------------------------------------- -PUT _inference/sparse_embedding/my-elser-model -{ - "service": "elser", - "service_settings": { - "adaptive_allocations": { - "enabled": true, - "min_number_of_allocations": 3, - "max_number_of_allocations": 10 - }, - "num_threads": 1 - } -} ------------------------------------------------------------- -// TEST[skip:TBD] +==== \ No newline at end of file diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 2908c55789bab..e0100b1c5640b 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -85,7 +85,7 @@ You can deploy {es} in various ways. **Hosted options** * {cloud}/ec-getting-started-trial.html[*Elastic Cloud Hosted*]: {es} is available as part of the hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14-day free trial]. -* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial]. +* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless*]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial]. **Advanced options** diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 44f90eded8632..e6e11d6dd539f 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -117,12 +117,10 @@ that sacrifices result accuracy for improved speed. The `dense_vector` type supports quantization to reduce the memory footprint required when <> `float` vectors. The three following quantization strategies are supported: -+ --- -`int8` - Quantizes each dimension of the vector to 1-byte integers. This reduces the memory footprint by 75% (or 4x) at the cost of some accuracy. -`int4` - Quantizes each dimension of the vector to half-byte integers. This reduces the memory footprint by 87% (or 8x) at the cost of accuracy. -`bbq` - experimental:[] Better binary quantization which reduces each dimension to a single bit precision. This reduces the memory footprint by 96% (or 32x) at a larger cost of accuracy. Generally, oversampling during query time and reranking can help mitigate the accuracy loss. --- +* `int8` - Quantizes each dimension of the vector to 1-byte integers. This reduces the memory footprint by 75% (or 4x) at the cost of some accuracy. +* `int4` - Quantizes each dimension of the vector to half-byte integers. This reduces the memory footprint by 87% (or 8x) at the cost of accuracy. +* `bbq` - experimental:[] Better binary quantization which reduces each dimension to a single bit precision. This reduces the memory footprint by 96% (or 32x) at a larger cost of accuracy. Generally, oversampling during query time and reranking can help mitigate the accuracy loss. + When using a quantized format, you may want to oversample and rescore the results to improve accuracy. See <> for more information. @@ -245,12 +243,11 @@ their vector field's similarity to the query vector. The `_score` of each document will be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds to a higher ranking. Defaults to `l2_norm` when `element_type: bit` otherwise defaults to `cosine`. - -NOTE: `bit` vectors only support `l2_norm` as their similarity metric. - + ^*^ This parameter can only be specified when `index` is `true`. + +NOTE: `bit` vectors only support `l2_norm` as their similarity metric. + .Valid values for `similarity` [%collapsible%open] ==== @@ -341,12 +338,12 @@ by 32x at the cost of accuracy. See <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. This field type and the <> type make it simpler to perform semantic search on your data. +If you don't specify an inference endpoint, the <> is used by default. Using `semantic_text`, you won't need to specify how to generate embeddings for your data, or how to index it. The {infer} endpoint automatically determines the embedding generation, indexing, and query to use. +If you use the ELSER service, you can set up `semantic_text` with the following API request: + [source,console] ------------------------------------------------------------ PUT my-index-000001 +{ + "mappings": { + "properties": { + "inference_field": { + "type": "semantic_text" + } + } + } +} +------------------------------------------------------------ + +If you use a service other than ELSER, you must create an {infer} endpoint using the <> and reference it when setting up `semantic_text` as the following example demonstrates: + +[source,console] +------------------------------------------------------------ +PUT my-index-000002 { "mappings": { "properties": { "inference_field": { "type": "semantic_text", - "inference_id": "my-elser-endpoint" + "inference_id": "my-openai-endpoint" <1> } } } } ------------------------------------------------------------ // TEST[skip:Requires inference endpoint] +<1> The `inference_id` of the {infer} endpoint to use to generate embeddings. The recommended way to use semantic_text is by having dedicated {infer} endpoints for ingestion and search. @@ -40,7 +60,7 @@ After creating dedicated {infer} endpoints for both, you can reference them usin [source,console] ------------------------------------------------------------ -PUT my-index-000002 +PUT my-index-000003 { "mappings": { "properties": { diff --git a/docs/reference/migration/migrate_8_18.asciidoc b/docs/reference/migration/migrate_8_18.asciidoc new file mode 100644 index 0000000000000..c989ff9f85b6d --- /dev/null +++ b/docs/reference/migration/migrate_8_18.asciidoc @@ -0,0 +1,20 @@ +[[migrating-8.18]] +== Migrating to 8.18 +++++ +8.18 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 8.18. + +See also <> and <>. + +coming::[8.18.0] + + +[discrete] +[[breaking-changes-8.18]] +=== Breaking changes + +There are no breaking changes in {es} 8.18. + diff --git a/docs/reference/migration/migrate_9_0.asciidoc b/docs/reference/migration/migrate_9_0.asciidoc index 6569647fd993e..5048220966bba 100644 --- a/docs/reference/migration/migrate_9_0.asciidoc +++ b/docs/reference/migration/migrate_9_0.asciidoc @@ -1,6 +1,3 @@ -// THIS IS A GENERATED FILE. DO NOT EDIT DIRECTLY. -// The content generated here are is not correct and most has been manually commented out until it can be fixed. -// See ES-9931 for more details. [[migrating-9.0]] == Migrating to 9.0 ++++ @@ -23,204 +20,229 @@ The following changes in {es} 9.0 might affect your applications and prevent them from operating normally. Before upgrading to 9.0, review these changes and take the described steps to mitigate the impact. -// -// [discrete] -// [[breaking_90_analysis_changes]] -// ==== Analysis changes -// -// [[set_lenient_to_true_by_default_when_using_updateable_synonyms]] -// .Set lenient to true by default when using updateable synonyms -// [%collapsible] -// ==== -// *Details* + -// When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient` -// value will now be `true`. -// -// *Impact* + -// `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by -// default. This prevents shard initialization errors on invalid synonyms. -// ==== -// -// [discrete] -// [[breaking_90_mapping_changes]] -// ==== Mapping changes -// -// [[jdk_locale_database_change]] -// .JDK locale database change -// [%collapsible] -// ==== -// *Details* + -// {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. -// -// If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. -// -// *Impact* + -// This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. -// -// Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. -// -// For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. -// ==== -// -// [discrete] -// [[breaking_90_analysis_changes]] -// ==== Analysis changes -// -// [[snowball_stemmers_have_been_upgraded]] -// .Snowball stemmers have been upgraded -// [%collapsible] -// ==== -// *Details* + -// Lucene 10 ships with an upgrade of its Snowball stemmers. For details see https://github.com/apache/lucene/issues/13209. Users using Snowball stemmers that are experiencing changes in search behaviour on existing data are advised to reindex. -// -// *Impact* + -// The upgrade should generally provide improved stemming results. Small changes in token analysis can lead to mismatches with previously index data, so existing indices using Snowball stemmers as part of their analysis chain should be reindexed. -// ==== -// -// [[german2_snowball_stemmer_an_alias_for_german_stemmer]] -// .The "german2" snowball stemmer is now an alias for the "german" stemmer -// [%collapsible] -// ==== -// *Details* + -// Lucene 10 has merged the improved "german2" snowball language stemmer with the "german" stemmer. For Elasticsearch, "german2" is now a deprecated alias for "german". This may results in slightly different tokens being generated for terms with umlaut substitution (like "ue" for "ü" etc...) -// -// *Impact* + -// Replace usages of "german2" with "german" in analysis configuration. Old indices that use the "german" stemmer should be reindexed if possible. -// ==== -// -// [[persian_analyzer_has_stemmer_by_default]] -// .The 'persian' analyzer has stemmer by default -// [%collapsible] -// ==== -// *Details* + -// Lucene 10 has added a final stemming step to its PersianAnalyzer that Elasticsearch exposes as 'persian' analyzer. Existing indices will keep the old non-stemming behaviour while new indices will see the updated behaviour with added stemming. Users that wish to maintain the non-stemming behaviour need to define their own analyzer as outlined in https://www.elastic.co/guide/en/elasticsearch/reference/8.15/analysis-lang-analyzer.html#persian-analyzer. Users that wish to use the new stemming behaviour for existing indices will have to reindex their data. -// -// *Impact* + -// Indexing with the 'persian' analyzer will produce slightly different tokens. Users should check if this impacts their search results. If they wish to maintain the legacy non-stemming behaviour they can define their own analyzer equivalent as explained in https://www.elastic.co/guide/en/elasticsearch/reference/8.15/analysis-lang-analyzer.html#persian-analyzer. -// ==== -// -// [[korean_dictionary_for_nori_has_been_updated]] -// .The Korean dictionary for Nori has been updated -// [%collapsible] -// ==== -// *Details* + -// Lucene 10 ships with an updated Korean dictionary (mecab-ko-dic-2.1.1). For details see https://github.com/apache/lucene/issues/11452. Users experiencing changes in search behaviour on existing data are advised to reindex. -// -// *Impact* + -// The change is small and should generally provide better analysis results. Existing indices for full-text use cases should be reindexed though. -// ==== -// -// [discrete] -// [[breaking_90_cluster_and_node_setting_changes]] -// ==== Cluster and node setting changes -// -// [[remove_unsupported_legacy_value_for_discovery_type]] -// .Remove unsupported legacy value for `discovery.type` -// [%collapsible] -// ==== -// *Details* + -// Earlier versions of {es} had a `discovery.type` setting which permitted values that referred to legacy discovery types. From v9.0.0 onwards, the only supported values for this setting are `multi-node` (the default) and `single-node`. -// -// *Impact* + -// Remove any value for `discovery.type` from your `elasticsearch.yml` configuration file. -// ==== -// -// [discrete] -// [[breaking_90_es_ql_changes]] -// ==== ES|QL changes -// -// [[esql_entirely_remove_meta_functions]] -// .ESQL: Entirely remove META FUNCTIONS -// [%collapsible] -// ==== -// *Details* + -// Removes an undocumented syntax from ESQL: META FUNCTION. This was never -// reliable or really useful. Consult the documentation instead. -// -// *Impact* + -// Removes an undocumented syntax from ESQL: META FUNCTION -// ==== -// -// [discrete] -// [[breaking_90_rest_api_changes]] -// ==== REST API changes -// -// [[remove_cluster_state_from_cluster_reroute_response]] -// .Remove cluster state from `/_cluster/reroute` response -// [%collapsible] -// ==== -// *Details* + -// The `POST /_cluster/reroute` API no longer returns the cluster state in its response. The `?metric` query parameter to this API now has no effect and its use will be forbidden in a future version. -// -// *Impact* + -// Cease usage of the `?metric` query parameter when calling the `POST /_cluster/reroute` API. -// ==== -// -// [[remove_deprecated_local_attribute_from_alias_apis]] -// .Remove deprecated local attribute from alias APIs -// [%collapsible] -// ==== -// *Details* + -// The following APIs no longer accept the `?local` query parameter: `GET /_alias`, `GET /_aliases`, `GET /_alias/{name}`, `HEAD /_alias/{name}`, `GET /{index}/_alias`, `HEAD /{index}/_alias`, `GET /{index}/_alias/{name}`, `HEAD /{index}/_alias/{name}`, `GET /_cat/aliases`, and `GET /_cat/aliases/{alias}`. This parameter has been deprecated and ignored since version 8.12. -// -// *Impact* + -// Cease usage of the `?local` query parameter when calling the listed APIs. -// ==== -// -// [[reworking_rrf_retriever_to_be_evaluated_during_rewrite_phase]] -// .Reworking RRF retriever to be evaluated during rewrite phase -// [%collapsible] -// ==== -// *Details* + -// In this release (8.16), we have introduced major changes to the retrievers framework -// and how they can be evaluated, focusing mainly on compound retrievers -// like `rrf` and `text_similarity_reranker`, which allowed us to support full -// composability (i.e. any retriever can be nested under any compound retriever), -// as well as supporting additional search features like collapsing, explaining, -// aggregations, and highlighting. -// -// To ensure consistency, and given that this rework is not available until 8.16, -// `rrf` and `text_similarity_reranker` retriever queries would now -// throw an exception in a mixed cluster scenario, where there are nodes -// both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. -// -// As part of the rework, we have also removed the `_rank` property from -// the responses of an `rrf` retriever. -// -// *Impact* + -// - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario -// with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. -// - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field -// will throw an exception -// ==== -// -// [[update_data_stream_lifecycle_telemetry_to_track_global_retention]] -// .Update data stream lifecycle telemetry to track global retention -// [%collapsible] -// ==== -// *Details* + -// In this release we introduced global retention settings that fulfil the following criteria: -// -// - a data stream managed by the data stream lifecycle, -// - a data stream that is not an internal data stream. -// -// As a result, we defined different types of retention: -// -// - **data retention**: the retention configured on data stream level by the data stream user or owner -// - **default global retention:** the retention configured by an admin on a cluster level and applied to any -// data stream that doesn't have data retention and fulfils the criteria. -// - **max global retention:** the retention configured by an admin to guard against having long retention periods. -// Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, -// in which case the max global retention applies. -// - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment -// in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. -// -// Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we -// renamed it to `data_retention` and added telemetry about the other configurations too. -// -// *Impact* + -// Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` -// ==== + + +There are no notable breaking changes in {es} 9.0. +But there are some less critical breaking changes. + +[discrete] +[[breaking_90_analysis_changes]] +==== Analysis changes + +[[snowball_stemmers_have_been_upgraded]] +.Snowball stemmers have been upgraded +[%collapsible] +==== +*Details* + +Lucene 10 ships with an upgrade of its Snowball stemmers. For details see https://github.com/apache/lucene/issues/13209. Users using Snowball stemmers that are experiencing changes in search behaviour on existing data are advised to reindex. + +*Impact* + +The upgrade should generally provide improved stemming results. Small changes in token analysis can lead to mismatches with previously index data, so existing indices using Snowball stemmers as part of their analysis chain should be reindexed. +==== + +[[german2_snowball_stemmer_an_alias_for_german_stemmer]] +.The "german2" snowball stemmer is now an alias for the "german" stemmer +[%collapsible] +==== +*Details* + +Lucene 10 has merged the improved "german2" snowball language stemmer with the "german" stemmer. For Elasticsearch, "german2" is now a deprecated alias for "german". This may results in slightly different tokens being generated for terms with umlaut substitution (like "ue" for "ü" etc...) + +*Impact* + +Replace usages of "german2" with "german" in analysis configuration. Old indices that use the "german" stemmer should be reindexed if possible. +==== + +[[persian_analyzer_has_stemmer_by_default]] +.The 'persian' analyzer has stemmer by default +[%collapsible] +==== +*Details* + +Lucene 10 has added a final stemming step to its PersianAnalyzer that Elasticsearch exposes as 'persian' analyzer. Existing indices will keep the old non-stemming behaviour while new indices will see the updated behaviour with added stemming. Users that wish to maintain the non-stemming behaviour need to define their own analyzer as outlined in https://www.elastic.co/guide/en/elasticsearch/reference/8.15/analysis-lang-analyzer.html#persian-analyzer. Users that wish to use the new stemming behaviour for existing indices will have to reindex their data. + +*Impact* + +Indexing with the 'persian' analyzer will produce slightly different tokens. Users should check if this impacts their search results. If they wish to maintain the legacy non-stemming behaviour they can define their own analyzer equivalent as explained in https://www.elastic.co/guide/en/elasticsearch/reference/8.15/analysis-lang-analyzer.html#persian-analyzer. +==== + +[[korean_dictionary_for_nori_has_been_updated]] +.The Korean dictionary for Nori has been updated +[%collapsible] +==== +*Details* + +Lucene 10 ships with an updated Korean dictionary (mecab-ko-dic-2.1.1). For details see https://github.com/apache/lucene/issues/11452. Users experiencing changes in search behaviour on existing data are advised to reindex. + +*Impact* + +The change is small and should generally provide better analysis results. Existing indices for full-text use cases should be reindexed though. +==== + +[discrete] +[[breaking_90_cluster_and_node_setting_changes]] +==== Cluster and node setting changes + +[[minimum_shard_balancer_threshold_1_0]] +.Minimum shard balancer threshold is now 1.0 +[%collapsible] +==== +*Details* + +Earlier versions of {es} accepted any non-negative value for `cluster.routing.allocation.balance.threshold`, but values smaller than `1.0` do not make sense and have been ignored since version 8.6.1. From 9.0.0 these nonsensical values are now forbidden. + +*Impact* + +Do not set `cluster.routing.allocation.balance.threshold` to a value less than `1.0`. +==== + +[[remove_cluster_routing_allocation_disk_watermark_enable_for_single_data_node_setting]] +.Remove `cluster.routing.allocation.disk.watermark.enable_for_single_data_node` setting +[%collapsible] +==== +*Details* + +Prior to 7.8, whenever a cluster had only a single data node, the watermarks would not be respected. In order to change this in 7.8+ in a backwards compatible way, we introduced the `cluster.routing.allocation.disk.watermark.enable_for_single_data_node` node setting. The setting was deprecated in 7.14 and was made to accept only true in 8.0 + +*Impact* + +No known end user impact +==== + +[[remove_deprecated_xpack_searchable_snapshot_allocate_on_rolling_restart_setting]] +.Remove deprecated `xpack.searchable.snapshot.allocate_on_rolling_restart` setting +[%collapsible] +==== +*Details* + +The `xpack.searchable.snapshot.allocate_on_rolling_restart` setting was created as an escape-hatch just in case relying on the `cluster.routing.allocation.enable=primaries` setting for allocating searchable snapshots during rolling restarts had some unintended side-effects. It has been deprecated since 8.2.0. + +*Impact* + +Remove `xpack.searchable.snapshot.allocate_on_rolling_restart` from your settings if present. +==== + +[[remove_unsupported_legacy_value_for_discovery_type]] +.Remove unsupported legacy value for `discovery.type` +[%collapsible] +==== +*Details* + +Earlier versions of {es} had a `discovery.type` setting which permitted values that referred to legacy discovery types. From v9.0.0 onwards, the only supported values for this setting are `multi-node` (the default) and `single-node`. + +*Impact* + +Remove any value for `discovery.type` from your `elasticsearch.yml` configuration file. +==== + +[discrete] +[[breaking_90_ingest_changes]] +==== Ingest changes + +[[remove_ecs_option_on_user_agent_processor]] +.Remove `ecs` option on `user_agent` processor +[%collapsible] +==== +*Details* + +The `user_agent` ingest processor no longer accepts the `ecs` option. (It was previously deprecated and ignored.) + +*Impact* + +Users should stop using the `ecs` option when creating instances of the `user_agent` ingest processor. The option will be removed from existing processors stored in the cluster state on upgrade. +==== + +[[remove_ignored_fallback_option_on_geoip_processor]] +.Remove ignored fallback option on GeoIP processor +[%collapsible] +==== +*Details* + +The option fallback_to_default_databases on the geoip ingest processor has been removed. (It was deprecated and ignored since 8.0.0.) + +*Impact* + +Customers should stop remove the noop fallback_to_default_databases option on any geoip ingest processors. +==== + +[discrete] +[[breaking_90_mapping_changes]] +==== Mapping changes + +[[remove_support_for_type_fields_copy_to_boost_in_metadata_field_definition]] +.Remove support for type, fields, copy_to and boost in metadata field definition +[%collapsible] +==== +*Details* + +The type, fields, copy_to and boost parameters are no longer supported in metadata field definition + +*Impact* + +Users providing type, fields, copy_to or boost as part of metadata field definition should remove them from their mappings. +==== + +[discrete] +[[breaking_90_rest_api_changes]] +==== REST API changes + +[[apply_more_strict_parsing_of_actions_in_bulk_api]] +.Apply more strict parsing of actions in bulk API +[%collapsible] +==== +*Details* + +Previously, the following classes of malformed input were deprecated but not rejected in the action lines of the a bulk request: missing closing brace; additional keys after the action (which were ignored); additional data after the closing brace (which was ignored). They will now be considered errors and rejected. + +*Impact* + +Users must provide well-formed input when using the bulk API. (They can request REST API compatibility with v8 to get the previous behaviour back as an interim measure.) +==== + +[[error_json_structure_has_changed_when_detailed_errors_are_disabled]] +.Error JSON structure has changed when detailed errors are disabled +[%collapsible] +==== +*Details* + +This change modifies the JSON format of error messages returned to REST clients +when detailed messages are turned off. +Previously, JSON returned when an exception occurred, and `http.detailed_errors.enabled: false` was set, +just consisted of a single `"error"` text field with some basic information. +Setting `http.detailed_errors.enabled: true` (the default) changed this field +to an object with more detailed information. +With this change, non-detailed errors now have the same structure as detailed errors. `"error"` will now always +be an object with, at a minimum, a `"type"` and `"reason"` field. Additional fields are included when detailed +errors are enabled. +To use the previous structure for non-detailed errors, use the v8 REST API. + +*Impact* + +If you have set `http.detailed_errors.enabled: false` (the default is `true`) +the structure of JSON when any exceptions occur now matches the structure when +detailed errors are enabled. +To use the previous structure for non-detailed errors, use the v8 REST API. +==== + +[[remove_cluster_state_from_cluster_reroute_response]] +.Remove cluster state from `/_cluster/reroute` response +[%collapsible] +==== +*Details* + +The `POST /_cluster/reroute` API no longer returns the cluster state in its response. The `?metric` query parameter to this API now has no effect and its use will be forbidden in a future version. + +*Impact* + +Cease usage of the `?metric` query parameter when calling the `POST /_cluster/reroute` API. +==== + +[[remove_deprecated_local_attribute_from_alias_apis]] +.Remove deprecated local attribute from alias APIs +[%collapsible] +==== +*Details* + +The following APIs no longer accept the `?local` query parameter: `GET /_alias`, `GET /_aliases`, `GET /_alias/{name}`, `HEAD /_alias/{name}`, `GET /{index}/_alias`, `HEAD /{index}/_alias`, `GET /{index}/_alias/{name}`, `HEAD /{index}/_alias/{name}`, `GET /_cat/aliases`, and `GET /_cat/aliases/{alias}`. This parameter has been deprecated and ignored since version 8.12. + +*Impact* + +Cease usage of the `?local` query parameter when calling the listed APIs. +==== + +[[remove_legacy_params_from_range_query]] +.Remove legacy params from range query +[%collapsible] +==== +*Details* + +The deprecated range query parameters `to`, `from`, `include_lower`, and `include_upper` are no longer supported. + +*Impact* + +Users should use `lt`, `lte`, `gt`, and `gte` query parameters instead. +==== + +[[remove_support_for_deprecated_force_source_highlighting_parameter]] +.Remove support for deprecated `force_source` highlighting parameter +[%collapsible] +==== +*Details* + +The deprecated highlighting `force_source` parameter is no longer supported. + +*Impact* + +Users should remove usages of the `force_source` parameter from their search requests. +==== [discrete] @@ -235,85 +257,45 @@ after upgrading to 9.0. To find out if you are using any deprecated functionality, enable <>. -// -// [discrete] -// [[deprecations_90_analysis]] -// ==== Analysis deprecations -// -// [[deprecate_dutch_kp_lovins_stemmer_as_they_are_removed_in_lucene_10]] -// .Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 -// [%collapsible] -// ==== -// *Details* + -// kp, dutch_kp, dutchKp and lovins stemmers are deprecated and will be removed. -// -// *Impact* + -// These stemmers will be removed and will be no longer supported. -// ==== -// -// [[deprecate_edge_ngram_side_parameter]] -// .deprecate `edge_ngram` side parameter -// [%collapsible] -// ==== -// *Details* + -// edge_ngram will no longer accept the side parameter. -// -// *Impact* + -// Users will need to update any usage of edge_ngram token filter that utilizes `side`. If the `back` value was used, they can achieve the same behavior by using the `reverse` token filter. -// ==== -// -// [discrete] -// [[deprecations_90_crud]] -// ==== CRUD deprecations -// -// [[deprecate_dot_prefixed_indices_composable_template_index_patterns]] -// .Deprecate dot-prefixed indices and composable template index patterns -// [%collapsible] -// ==== -// *Details* + -// Indices beginning with a dot '.' are reserved for system and internal indices, and should not be used by and end-user. Additionally, composable index templates that contain patterns for dot-prefixed indices should also be avoided, as these patterns are meant for internal use only. In a future Elasticsearch version, creation of these dot-prefixed indices will no longer be allowed. -// -// *Impact* + -// Requests performing an action that would create an index beginning with a dot (indexing a document, manual creation, reindex), or creating an index template with index patterns beginning with a dot, will contain a deprecation header warning about dot-prefixed indices in the response. -// ==== -// -// [discrete] -// [[deprecations_90_rest_api]] -// ==== REST API deprecations -// -// [[adding_deprecation_warnings_for_rrf_using_rank_sub_searches]] -// .Adding deprecation warnings for rrf using rank and `sub_searches` -// [%collapsible] -// ==== -// *Details* + -// Search API parameter `sub_searches` will no longer be a supported and will be removed in future releases. Similarly, `rrf` can only be used through the specified `retriever` and no longer though the `rank` parameter -// -// *Impact* + -// Requests specifying rrf through `rank` and/or `sub_searches` elements will be disallowed in a future version. Users should instead utilize the new `retriever` parameter. -// ==== -// -// [[deprecate_legacy_params_from_range_query]] -// .Deprecate legacy params from range query -// [%collapsible] -// ==== -// *Details* + -// Range query will not longer accept `to`, `from`, `include_lower`, and `include_upper` parameters. -// -// *Impact* + -// Instead use `gt`, `gte`, `lt` and `lte` parameters. -// ==== -// -// [[inference_api_deprecate_elser_service]] -// .[Inference API] Deprecate elser service -// [%collapsible] -// ==== -// *Details* + -// The `elser` service of the inference API will be removed in an upcoming release. Please use the elasticsearch service instead. -// -// *Impact* + -// In the current version there is no impact. In a future version, users of the `elser` service will no longer be able to use it, and will be required to use the `elasticsearch` service to access elser through the inference API. -// ==== - -// BELOW WAS MANUALLY ADDED TO FIX THE BUILD -include::migrate_9_0/transient-settings-migration-guide.asciidoc[] -//include::migrate_9_0/rest-api-changes.asciidoc[] //see ES-9932 + +[discrete] +[[deprecations_90_mapping]] +==== Mapping deprecations + +[[deprecate_source_mode_in_mappings]] +.Deprecate `_source.mode` in mappings +[%collapsible] +==== +*Details* + +Configuring `_source.mode` in mappings is deprecated and will be removed in future versions. Use `index.mapping.source.mode` index setting instead. + +*Impact* + +Use `index.mapping.source.mode` index setting instead +==== + +[discrete] +[[deprecations_90_rest_api]] +==== REST API deprecations + +[[document_type_deprecated_on_simulate_pipeline_api]] +.Document `_type` deprecated on simulate pipeline API +[%collapsible] +==== +*Details* + +Passing a document with a `_type` property is deprecated in the `/_ingest/pipeline/{id}/_simulate` and `/_ingest/pipeline/_simulate` APIs. + +*Impact* + +Users should already have stopped using mapping types, which were deprecated in {es} 7. This deprecation warning will fire if they specify mapping types on documents pass to the simulate pipeline API. +==== + +[[inference_api_deprecate_elser_service]] +.[Inference API] Deprecate elser service +[%collapsible] +==== +*Details* + +The `elser` service of the inference API will be removed in an upcoming release. Please use the elasticsearch service instead. + +*Impact* + +In the current version there is no impact. In a future version, users of the `elser` service will no longer be able to use it, and will be required to use the `elasticsearch` service to access elser through the inference API. +==== + diff --git a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc index beff87e6ec6e6..b55f022a5d168 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc @@ -235,7 +235,7 @@ The reason for the current state. Usually only populated when the `routing_state (string) The current routing state. -- -* `starting`: The model is attempting to allocate on this model, inference calls are not yet accepted. +* `starting`: The model is attempting to allocate on this node, inference calls are not yet accepted. * `started`: The model is allocated and ready to accept inference requests. * `stopping`: The model is being deallocated from this node. * `stopped`: The model is fully deallocated from this node. diff --git a/docs/reference/modules/discovery/voting.asciidoc b/docs/reference/modules/discovery/voting.asciidoc index 9e483d5883017..04cae9d02ab66 100644 --- a/docs/reference/modules/discovery/voting.asciidoc +++ b/docs/reference/modules/discovery/voting.asciidoc @@ -63,8 +63,7 @@ departed nodes from the voting configuration manually. Use the of resilience. No matter how it is configured, Elasticsearch will not suffer from a -"{wikipedia}/Split-brain_(computing)[split-brain]" inconsistency. -The `cluster.auto_shrink_voting_configuration` +"split-brain" inconsistency. The `cluster.auto_shrink_voting_configuration` setting affects only its availability in the event of the failure of some of its nodes and the administrative tasks that must be performed as nodes join and leave the cluster. diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 984fb0d5bf1c1..17d67e00e2ebb 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -145,11 +145,9 @@ NOTE: This header is only returned when the setting is set to `true`. `http.detailed_errors.enabled`:: (<>, boolean) -Configures whether detailed error reporting in HTTP responses is enabled. -Defaults to `true`, which means that HTTP requests that include the -<> will return a -detailed error message including a stack trace if they encounter an exception. -If set to `false`, requests with the `?error_trace` parameter are rejected. +Configures whether detailed error reporting in HTTP responses is enabled. Defaults to `true`. +When this option is set to `false`, only basic information is returned if an error occurs in the request, +and requests with <> set are rejected. `http.pipelining.max_events`:: (<>, integer) diff --git a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc index fee4b797da724..a024305588cae 100644 --- a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc +++ b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc @@ -511,8 +511,9 @@ In this tutorial scenario it's useful for when users have complex requirements f Let's create a query that addresses the following user needs: -* Must be a vegetarian main course +* Must be a vegetarian recipe * Should contain "curry" or "spicy" in the title or description +* Should be a main course * Must not be a dessert * Must have a rating of at least 4.5 * Should prefer recipes published in the last month @@ -524,16 +525,7 @@ GET /cooking_blog/_search "query": { "bool": { "must": [ - { - "term": { - "category.keyword": "Main Course" - } - }, - { - "term": { - "tags": "vegetarian" - } - }, + { "term": { "tags": "vegetarian" } }, { "range": { "rating": { @@ -543,10 +535,18 @@ GET /cooking_blog/_search } ], "should": [ + { + "term": { + "category": "Main Course" + } + }, { "multi_match": { "query": "curry spicy", - "fields": ["title^2", "description"] + "fields": [ + "title^2", + "description" + ] } }, { @@ -590,12 +590,12 @@ GET /cooking_blog/_search "value": 1, "relation": "eq" }, - "max_score": 7.9835095, + "max_score": 7.444513, "hits": [ { "_index": "cooking_blog", "_id": "2", - "_score": 7.9835095, + "_score": 7.444513, "_source": { "title": "Spicy Thai Green Curry: A Vegetarian Adventure", <1> "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", <2> @@ -619,8 +619,8 @@ GET /cooking_blog/_search <1> The title contains "Spicy" and "Curry", matching our should condition. With the default <> behavior, this field contributes most to the relevance score. <2> While the description also contains matching terms, only the best matching field's score is used by default. <3> The recipe was published within the last month, satisfying our recency preference. -<4> The "Main Course" category matches our `must` condition. -<5> The "vegetarian" tag satisfies another `must` condition, while "curry" and "spicy" tags align with our `should` preferences. +<4> The "Main Course" category satisfies another `should` condition. +<5> The "vegetarian" tag satisfies a `must` condition, while "curry" and "spicy" tags align with our `should` preferences. <6> The rating of 4.6 meets our minimum rating requirement of 4.5. ============== diff --git a/docs/reference/release-notes/8.18.0.asciidoc b/docs/reference/release-notes/8.18.0.asciidoc new file mode 100644 index 0000000000000..332edfbc23eb7 --- /dev/null +++ b/docs/reference/release-notes/8.18.0.asciidoc @@ -0,0 +1,8 @@ +[[release-notes-8.18.0]] +== {es} version 8.18.0 + +coming[8.18.0] + +Also see <>. + + diff --git a/docs/reference/release-notes/9.0.0.asciidoc b/docs/reference/release-notes/9.0.0.asciidoc index af26fd57385e3..93e5a30cb82f7 100644 --- a/docs/reference/release-notes/9.0.0.asciidoc +++ b/docs/reference/release-notes/9.0.0.asciidoc @@ -1,6 +1,3 @@ -// THIS IS A GENERATED FILE. DO NOT EDIT DIRECTLY. -// The content generated here are is not correct and most has been manually commented out until it can be fixed. -// See ES-9931 for more details. [[release-notes-9.0.0]] == {es} version 9.0.0 @@ -12,546 +9,289 @@ Also see <>. [float] === Breaking changes -// Allocation:: -// * Remove cluster state from `/_cluster/reroute` response {es-pull}114231[#114231] (issue: {es-issue}88978[#88978]) -// -// Analysis:: -// * Set lenient to true by default when using updateable synonyms {es-pull}110901[#110901] -// * Snowball stemmers have been upgraded {es-pull}114146[#114146] -// * The 'german2' stemmer is now an alias for the 'german' snowball stemmer {es-pull}113614[#113614] -// * The 'persian' analyzer has stemmer by default {es-pull}113482[#113482] (issue: {es-issue}113050[#113050]) -// * The Korean dictionary for Nori has been updated {es-pull}114124[#114124] -// -// Cluster Coordination:: -// * Remove unsupported legacy value for `discovery.type` {es-pull}112903[#112903] -// -// Data streams:: -// * Update data stream lifecycle telemetry to track global retention {es-pull}112451[#112451] -// -// ES|QL:: -// * ESQL: Entirely remove META FUNCTIONS {es-pull}113967[#113967] -// -// Indices APIs:: -// * Remove deprecated local attribute from alias APIs {es-pull}115393[#115393] -// -// Mapping:: -// * JDK locale database change {es-pull}113975[#113975] -// -// Search:: -// * Adding breaking change entry for retrievers {es-pull}115399[#115399] +Allocation:: +* Increase minimum threshold in shard balancer {es-pull}115831[#115831] +* Remove `cluster.routing.allocation.disk.watermark.enable_for_single_data_node` setting {es-pull}114207[#114207] +* Remove cluster state from `/_cluster/reroute` response {es-pull}114231[#114231] (issue: {es-issue}88978[#88978]) + +Analysis:: +* Snowball stemmers have been upgraded {es-pull}114146[#114146] +* The 'german2' stemmer is now an alias for the 'german' snowball stemmer {es-pull}113614[#113614] +* The 'persian' analyzer has stemmer by default {es-pull}113482[#113482] (issue: {es-issue}113050[#113050]) +* The Korean dictionary for Nori has been updated {es-pull}114124[#114124] + +Cluster Coordination:: +* Remove unsupported legacy value for `discovery.type` {es-pull}112903[#112903] + +Highlighting:: +* Remove support for deprecated `force_source` highlighting parameter {es-pull}116943[#116943] + +Indices APIs:: +* Apply more strict parsing of actions in bulk API {es-pull}115923[#115923] +* Remove deprecated local attribute from alias APIs {es-pull}115393[#115393] + +Infra/REST API:: +* Output a consistent format when generating error json {es-pull}90529[#90529] (issue: {es-issue}89387[#89387]) + +Ingest Node:: +* Remove `ecs` option on `user_agent` processor {es-pull}116077[#116077] +* Remove ignored fallback option on GeoIP processor {es-pull}116112[#116112] + +Mapping:: +* Remove support for type, fields, `copy_to` and boost in metadata field definition {es-pull}116944[#116944] + +Search:: +* Remove legacy params from range query {es-pull}116970[#116970] + +Snapshot/Restore:: +* Remove deprecated `xpack.searchable.snapshot.allocate_on_rolling_restart` setting {es-pull}114202[#114202] [[bug-9.0.0]] [float] === Bug fixes -// -// Aggregations:: -// * Always check the parent breaker with zero bytes in `PreallocatedCircuitBreakerService` {es-pull}115181[#115181] -// * Force using the last centroid during merging {es-pull}111644[#111644] (issue: {es-issue}111065[#111065]) -// -// Authentication:: -// * Check for disabling own user in Put User API {es-pull}112262[#112262] (issue: {es-issue}90205[#90205]) -// * Expose cluster-state role mappings in APIs {es-pull}114951[#114951] -// -// Authorization:: -// * Fix DLS & FLS sometimes being enforced when it is disabled {es-pull}111915[#111915] (issue: {es-issue}94709[#94709]) -// * Fix DLS using runtime fields and synthetic source {es-pull}112341[#112341] -// -// CRUD:: -// * Don't fail retention lease sync actions due to capacity constraints {es-pull}109414[#109414] (issue: {es-issue}105926[#105926]) -// * Preserve thread context when waiting for segment generation in RTG {es-pull}114623[#114623] -// * Standardize error code when bulk body is invalid {es-pull}114869[#114869] -// -// Cluster Coordination:: -// * Ensure clean thread context in `MasterService` {es-pull}114512[#114512] -// -// Data streams:: -// * Adding support for data streams with a match-all template {es-pull}111311[#111311] (issue: {es-issue}111204[#111204]) -// * Exclude internal data streams from global retention {es-pull}112100[#112100] -// * Fix verbose get data stream API not requiring extra privileges {es-pull}112973[#112973] -// * OTel mappings: avoid metrics to be rejected when attributes are malformed {es-pull}114856[#114856] -// * [otel-data] Add more kubernetes aliases {es-pull}115429[#115429] -// * logs-apm.error-*: define log.level field as keyword {es-pull}112440[#112440] -// -// Distributed:: -// * Handle `InternalSendException` inline for non-forking handlers {es-pull}114375[#114375] -// -// EQL:: -// * Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` {es-pull}114819[#114819] (issue: {es-issue}114599[#114599]) -// * Fix validation of TEXT fields with case insensitive comparison {es-pull}111238[#111238] (issue: {es-issue}111235[#111235]) -// -// ES|QL:: -// * ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling {es-pull}111367[#111367] -// * ESQL: Align year diffing to the rest of the units in DATE_DIFF: chronological {es-pull}113103[#113103] (issue: {es-issue}112482[#112482]) -// * ESQL: Disable pushdown of WHERE past STATS {es-pull}115308[#115308] (issue: {es-issue}115281[#115281]) -// * ESQL: Fix CASE when conditions are multivalued {es-pull}112401[#112401] (issue: {es-issue}112359[#112359]) -// * ESQL: Fix Double operations returning infinite {es-pull}111064[#111064] (issue: {es-issue}111026[#111026]) -// * ESQL: Fix `REVERSE` with backspace character {es-pull}115245[#115245] (issues: {es-issue}114372[#114372], {es-issue}115227[#115227], {es-issue}115228[#115228]) -// * ESQL: Fix a bug in `MV_PERCENTILE` {es-pull}112218[#112218] (issues: {es-issue}112193[#112193], {es-issue}112180[#112180], {es-issue}112187[#112187], {es-issue}112188[#112188]) -// * ESQL: Fix filtered grouping on ords {es-pull}115312[#115312] (issue: {es-issue}114897[#114897]) -// * ESQL: Fix grammar changes around per agg filtering {es-pull}114848[#114848] -// * ESQL: Fix serialization during `can_match` {es-pull}111779[#111779] (issues: {es-issue}111701[#111701], {es-issue}111726[#111726]) -// * ESQL: Fix synthetic attribute pruning {es-pull}111413[#111413] (issue: {es-issue}105821[#105821]) -// * ESQL: don't lose the original casting error message {es-pull}111968[#111968] (issue: {es-issue}111967[#111967]) -// * ESQL: fix for missing indices error message {es-pull}111797[#111797] (issue: {es-issue}111712[#111712]) -// * ES|QL: Fix stats by constant expression {es-pull}114899[#114899] -// * ES|QL: Restrict sorting for `_source` and counter field types {es-pull}114638[#114638] (issues: {es-issue}114423[#114423], {es-issue}111976[#111976]) -// * ES|QL: better validation for GROK patterns {es-pull}110574[#110574] (issue: {es-issue}110533[#110533]) -// * ES|QL: better validation for RLIKE patterns {es-pull}112489[#112489] (issue: {es-issue}112485[#112485]) -// * ES|QL: better validation of GROK patterns {es-pull}112200[#112200] (issue: {es-issue}112111[#112111]) -// * Fix ST_CENTROID_AGG when no records are aggregated {es-pull}114888[#114888] (issue: {es-issue}106025[#106025]) -// * Fix TDigestState.read CB leaks {es-pull}114303[#114303] (issue: {es-issue}114194[#114194]) -// * Spatial search functions support multi-valued fields in compute engine {es-pull}112063[#112063] (issues: {es-issue}112102[#112102], {es-issue}112505[#112505], {es-issue}110830[#110830]) -// * [ES|QL] Check expression resolved before checking its data type in `ImplicitCasting` {es-pull}113314[#113314] (issue: {es-issue}113242[#113242]) -// * [ES|QL] Simplify patterns for subfields {es-pull}111118[#111118] -// * [ES|QL] Simplify syntax of named parameter for identifier and pattern {es-pull}115061[#115061] -// * [ES|QL] Skip validating remote cluster index names in parser {es-pull}114271[#114271] -// * [ES|QL] Use `RangeQuery` and String in `BinaryComparison` on datetime fields {es-pull}110669[#110669] (issue: {es-issue}107900[#107900]) -// * [ES|QL] add tests for stats by constant {es-pull}110593[#110593] (issue: {es-issue}105383[#105383]) -// * [ES|QL] make named parameter for identifier and pattern snapshot {es-pull}114784[#114784] -// * [ES|QL] validate `mv_sort` order {es-pull}110021[#110021] (issue: {es-issue}109910[#109910]) -// -// Geo:: -// * Fix cases of collections with one point {es-pull}111193[#111193] (issue: {es-issue}110982[#110982]) -// -// Health:: -// * Set `replica_unassigned_buffer_time` in constructor {es-pull}112612[#112612] -// -// ILM+SLM:: -// * Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY` isn't changed as side-effect {es-pull}111215[#111215] -// -// Indices APIs:: -// * Revert "Add `ResolvedExpression` wrapper" {es-pull}115317[#115317] -// -// Infra/Core:: -// * Fix max file size check to use `getMaxFileSize` {es-pull}113723[#113723] (issue: {es-issue}113705[#113705]) -// * Guard blob store local directory creation with `doPrivileged` {es-pull}115459[#115459] -// * Handle `BigInteger` in xcontent copy {es-pull}111937[#111937] (issue: {es-issue}111812[#111812]) -// * Report JVM stats for all memory pools (97046) {es-pull}115117[#115117] (issue: {es-issue}97046[#97046]) -// * `ByteArrayStreamInput:` Return -1 when there are no more bytes to read {es-pull}112214[#112214] -// -// Infra/Logging:: -// * Only emit product origin in deprecation log if present {es-pull}111683[#111683] (issue: {es-issue}81757[#81757]) -// -// Infra/Metrics:: -// * Make `randomInstantBetween` always return value in range [minInstant, `maxInstant]` {es-pull}114177[#114177] -// -// Infra/REST API:: -// * Fixed a `NullPointerException` in `_capabilities` API when the `path` parameter is null. {es-pull}113413[#113413] (issue: {es-issue}113413[#113413]) -// -// Infra/Settings:: -// * GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml {es-pull}110816[#110816] (issue: {es-issue}110815[#110815]) -// -// Ingest Node:: -// * Add warning headers for ingest pipelines containing special characters {es-pull}114837[#114837] (issue: {es-issue}104411[#104411]) -// * Fix IPinfo geolocation schema {es-pull}115147[#115147] -// * Fix `getDatabaseType` for unusual MMDBs {es-pull}112888[#112888] -// * Reducing error-level stack trace logging for normal events in `GeoIpDownloader` {es-pull}114924[#114924] -// -// License:: -// * Fix Start Trial API output acknowledgement header for features {es-pull}111740[#111740] (issue: {es-issue}111739[#111739]) -// * Fix `TokenService` always appearing used in Feature Usage {es-pull}112263[#112263] (issue: {es-issue}61956[#61956]) -// -// Logs:: -// * Do not expand dots when storing objects in ignored source {es-pull}113910[#113910] -// * Fix `ignore_above` handling in synthetic source when index level setting is used {es-pull}113570[#113570] (issue: {es-issue}113538[#113538]) -// * Fix synthetic source for flattened field when used with `ignore_above` {es-pull}113499[#113499] (issue: {es-issue}112044[#112044]) -// -// Machine Learning:: -// * Avoid `ModelAssignment` deadlock {es-pull}109684[#109684] -// * Fix NPE in Get Deployment Stats {es-pull}115404[#115404] -// * Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up {es-pull}110734[#110734] -// * Ignore unrecognized openai sse fields {es-pull}114715[#114715] -// * Mitigate IOSession timeouts {es-pull}115414[#115414] (issues: {es-issue}114385[#114385], {es-issue}114327[#114327], {es-issue}114105[#114105], {es-issue}114232[#114232]) -// * Prevent NPE if model assignment is removed while waiting to start {es-pull}115430[#115430] -// * Send mid-stream errors to users {es-pull}114549[#114549] -// * Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId` {es-pull}111490[#111490] -// * Warn for model load failures if they have a status code <500 {es-pull}113280[#113280] -// * [Inference API] Remove unused Cohere rerank service settings fields in a BWC way {es-pull}110427[#110427] -// * [ML] Create Inference API will no longer return model_id and now only return inference_id {es-pull}112508[#112508] -// -// Mapping:: -// * Fix `MapperBuilderContext#isDataStream` when used in dynamic mappers {es-pull}110554[#110554] -// * Fix synthetic source field names for multi-fields {es-pull}112850[#112850] -// * Retrieve the source for objects and arrays in a separate parsing phase {es-pull}113027[#113027] (issue: {es-issue}112374[#112374]) -// * Two empty mappings now are created equally {es-pull}107936[#107936] (issue: {es-issue}107031[#107031]) -// -// Ranking:: -// * Fix MLTQuery handling of custom term frequencies {es-pull}110846[#110846] -// * Fix RRF validation for `rank_constant` < 1 {es-pull}112058[#112058] -// * Fix score count validation in reranker response {es-pull}111212[#111212] (issue: {es-issue}111202[#111202]) -// -// Search:: -// * Allow for querries on `_tier` to skip shards in the `can_match` phase {es-pull}114990[#114990] (issue: {es-issue}114910[#114910]) -// * Allow out of range term queries for numeric types {es-pull}112916[#112916] -// * Do not exclude empty arrays or empty objects in source filtering {es-pull}112250[#112250] (issue: {es-issue}109668[#109668]) -// * Fix synthetic source handling for `bit` type in `dense_vector` field {es-pull}114407[#114407] (issue: {es-issue}114402[#114402]) -// * Improve DateTime error handling and add some bad date tests {es-pull}112723[#112723] (issue: {es-issue}112190[#112190]) -// * Improve date expression/remote handling in index names {es-pull}112405[#112405] (issue: {es-issue}112243[#112243]) -// * Make "too many clauses" throw IllegalArgumentException to avoid 500s {es-pull}112678[#112678] (issue: {es-issue}112177[#112177]) -// * Make empty string searches be consistent with case (in)sensitivity {es-pull}110833[#110833] -// * Prevent flattening of ordered and unordered interval sources {es-pull}114234[#114234] -// * Remove needless forking to GENERIC in `TransportMultiSearchAction` {es-pull}110796[#110796] -// * Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields {es-pull}107047[#107047] (issue: {es-issue}106846[#106846]) -// * Span term query to convert to match no docs when unmapped field is targeted {es-pull}113251[#113251] -// * Speedup `CanMatchPreFilterSearchPhase` constructor {es-pull}110860[#110860] -// * Updated Date Range to Follow Documentation When Assuming Missing Values {es-pull}112258[#112258] (issue: {es-issue}111484[#111484]) -// -// Security:: -// * Updated the transport CA name in Security Auto-Configuration. {es-pull}106520[#106520] (issue: {es-issue}106455[#106455]) -// -// Snapshot/Restore:: -// * Retry throttled snapshot deletions {es-pull}113237[#113237] -// -// TSDB:: -// * Implement `parseBytesRef` for `TimeSeriesRoutingHashFieldType` {es-pull}113373[#113373] (issue: {es-issue}112399[#112399]) -// -// Task Management:: -// * Improve handling of failure to create persistent task {es-pull}114386[#114386] -// -// Transform:: -// * Allow task canceling of validate API calls {es-pull}110951[#110951] -// * Include reason when no nodes are found {es-pull}112409[#112409] (issue: {es-issue}112404[#112404]) -// -// Vector Search:: -// * Fix dim validation for bit `element_type` {es-pull}114533[#114533] -// * Support semantic_text in object fields {es-pull}114601[#114601] (issue: {es-issue}114401[#114401]) -// -// Watcher:: -// * Truncating watcher history if it is too large {es-pull}111245[#111245] (issue: {es-issue}94745[#94745]) -// * Watch Next Run Interval Resets On Shard Move or Node Restart {es-pull}115102[#115102] (issue: {es-issue}111433[#111433]) -// -// [[deprecation-9.0.0]] -// [float] -// === Deprecations -// -// Analysis:: -// * Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 {es-pull}113143[#113143] -// * deprecate `edge_ngram` side parameter {es-pull}110829[#110829] -// -// CRUD:: -// * Deprecate dot-prefixed indices and composable template index patterns {es-pull}112571[#112571] -// -// Machine Learning:: -// * [Inference API] Deprecate elser service {es-pull}113216[#113216] -// -// Search:: -// * Adding deprecation warnings for rrf using rank and `sub_searches` {es-pull}114854[#114854] -// * Deprecate legacy params from range query {es-pull}113286[#113286] -// -// [[enhancement-9.0.0]] -// [float] -// === Enhancements -// -// Aggregations:: -// * Account for `DelayedBucket` before reduction {es-pull}113013[#113013] -// * Add protection for OOM during aggregations partial reduction {es-pull}110520[#110520] -// * Deduplicate `BucketOrder` when deserializing {es-pull}112707[#112707] -// * Lower the memory footprint when creating `DelayedBucket` {es-pull}112519[#112519] -// * Reduce heap usage for `AggregatorsReducer` {es-pull}112874[#112874] -// * Remove reduce and `reduceContext` from `DelayedBucket` {es-pull}112547[#112547] -// -// Allocation:: -// * Add link to flood-stage watermark exception message {es-pull}111315[#111315] -// * Always allow rebalancing by default {es-pull}111015[#111015] -// * Only publish desired balance gauges on master {es-pull}115383[#115383] -// -// Application:: -// * [Profiling] add `container.id` field to event index template {es-pull}111969[#111969] -// -// Authorization:: -// * Add manage roles privilege {es-pull}110633[#110633] -// * Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration {es-pull}112574[#112574] -// * [Security Solution] Add `create_index` to `kibana_system` role for index/DS `.logs-endpoint.action.responses-*` {es-pull}115241[#115241] -// -// CRUD:: -// * Suppress merge-on-recovery for older indices {es-pull}113462[#113462] -// -// Codec:: -// * Remove zstd feature flag for index codec best compression {es-pull}112665[#112665] -// -// Data streams:: -// * Add 'verbose' flag retrieving `maximum_timestamp` for get data stream API {es-pull}112303[#112303] -// * Display effective retention in the relevant data stream APIs {es-pull}112019[#112019] -// * Expose global retention settings via data stream lifecycle API {es-pull}112210[#112210] -// * Make ecs@mappings work with OTel attributes {es-pull}111600[#111600] -// -// Distributed:: -// * Add link to Max Shards Per Node exception message {es-pull}110993[#110993] -// * Use Azure blob batch API to delete blobs in batches {es-pull}114566[#114566] -// -// EQL:: -// * ESQL: Delay construction of warnings {es-pull}114368[#114368] -// -// ES|QL:: -// * Add EXP ES|QL function {es-pull}110879[#110879] -// * Add `CircuitBreaker` to TDigest, Step 3: Connect with ESQL CB {es-pull}113387[#113387] -// * Add `CircuitBreaker` to TDigest, Step 4: Take into account shallow classes size {es-pull}113613[#113613] (issue: {es-issue}113916[#113916]) -// * Collect and display execution metadata for ES|QL cross cluster searches {es-pull}112595[#112595] (issue: {es-issue}112402[#112402]) -// * ESQL: Add support for multivalue fields in Arrow output {es-pull}114774[#114774] -// * ESQL: BUCKET: allow numerical spans as whole numbers {es-pull}111874[#111874] (issues: {es-issue}104646[#104646], {es-issue}109340[#109340], {es-issue}105375[#105375]) -// * ESQL: Have BUCKET generate friendlier intervals {es-pull}111879[#111879] (issue: {es-issue}110916[#110916]) -// * ESQL: Profile more timing information {es-pull}111855[#111855] -// * ESQL: Push down filters even in case of renames in Evals {es-pull}114411[#114411] -// * ESQL: Remove parent from `FieldAttribute` {es-pull}112881[#112881] -// * ESQL: Speed up CASE for some parameters {es-pull}112295[#112295] -// * ESQL: Speed up grouping by bytes {es-pull}114021[#114021] -// * ESQL: Support INLINESTATS grouped on expressions {es-pull}111690[#111690] -// * ESQL: Use less memory in listener {es-pull}114358[#114358] -// * ES|QL: Add support for cached strings in plan serialization {es-pull}112929[#112929] -// * ES|QL: add Telemetry API and track top functions {es-pull}111226[#111226] -// * ES|QL: add metrics for functions {es-pull}114620[#114620] -// * Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function {es-pull}112938[#112938] (issue: {es-issue}109973[#109973]) -// * Siem ea 9521 improve test {es-pull}111552[#111552] -// * Support multi-valued fields in compute engine for ST_DISTANCE {es-pull}114836[#114836] (issue: {es-issue}112910[#112910]) -// * [ESQL] Add `SPACE` function {es-pull}112350[#112350] -// * [ESQL] Add finish() elapsed time to aggregation profiling times {es-pull}113172[#113172] (issue: {es-issue}112950[#112950]) -// * [ESQL] Make query wrapped by `SingleValueQuery` cacheable {es-pull}110116[#110116] -// * [ES|QL] Add hypot function {es-pull}114382[#114382] -// * [ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and In at Analyzer {es-pull}111917[#111917] (issue: {es-issue}111486[#111486]) -// * [ES|QL] Combine Disjunctive CIDRMatch {es-pull}111501[#111501] (issue: {es-issue}105143[#105143]) -// * [ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field {es-pull}111437[#111437] -// * [ES|QL] Name parameter with leading underscore {es-pull}111950[#111950] (issue: {es-issue}111821[#111821]) -// * [ES|QL] Named parameter for field names and field name patterns {es-pull}112905[#112905] -// * [ES|QL] Validate index name in parser {es-pull}112081[#112081] -// * [ES|QL] add reverse function {es-pull}113297[#113297] -// * [ES|QL] explicit cast a string literal to `date_period` and `time_duration` in arithmetic operations {es-pull}109193[#109193] -// -// Experiences:: -// * Integrate IBM watsonx to Inference API for text embeddings {es-pull}111770[#111770] -// -// Geo:: -// * Add support for spatial relationships in point field mapper {es-pull}112126[#112126] -// * Small performance improvement in h3 library {es-pull}113385[#113385] -// * Support docvalues only query in shape field {es-pull}112199[#112199] -// -// Health:: -// * (API) Cluster Health report `unassigned_primary_shards` {es-pull}112024[#112024] -// * Do not treat replica as unassigned if primary recently created and unassigned time is below a threshold {es-pull}112066[#112066] -// * Increase `replica_unassigned_buffer_time` default from 3s to 5s {es-pull}112834[#112834] -// -// ILM+SLM:: -// * ILM: Add `total_shards_per_node` setting to searchable snapshot {es-pull}112972[#112972] (issue: {es-issue}112261[#112261]) -// * PUT slm policy should only increase version if actually changed {es-pull}111079[#111079] -// * Preserve Step Info Across ILM Auto Retries {es-pull}113187[#113187] -// * Register SLM run before snapshotting to save stats {es-pull}110216[#110216] -// * SLM interval schedule followup - add back `getFieldName` style getters {es-pull}112123[#112123] -// -// Infra/Circuit Breakers:: -// * Add link to Circuit Breaker "Data too large" exception message {es-pull}113561[#113561] -// -// Infra/Core:: -// * Add nanos support to `ZonedDateTime` serialization {es-pull}111689[#111689] (issue: {es-issue}68292[#68292]) -// * Extend logging for dropped warning headers {es-pull}111624[#111624] (issue: {es-issue}90527[#90527]) -// * Give the kibana system user permission to read security entities {es-pull}114363[#114363] -// -// Infra/Metrics:: -// * Add `TaskManager` to `pluginServices` {es-pull}112687[#112687] -// * Add `ensureGreen` test method for use with `adminClient` {es-pull}113425[#113425] -// -// Infra/REST API:: -// * Optimize the loop processing of URL decoding {es-pull}110237[#110237] (issue: {es-issue}110235[#110235]) -// -// Infra/Scripting:: -// * Add a `mustache.max_output_size_bytes` setting to limit the length of results from mustache scripts {es-pull}114002[#114002] -// * Expose `HexFormat` in Painless {es-pull}112412[#112412] -// -// Infra/Settings:: -// * Improve exception message for bad environment variable placeholders in settings {es-pull}114552[#114552] (issue: {es-issue}110858[#110858]) -// * Reprocess operator file settings when settings service starts, due to node restart or master node change {es-pull}114295[#114295] -// -// Ingest Node:: -// * Add `size_in_bytes` to enrich cache stats {es-pull}110578[#110578] -// * Add support for templates when validating mappings in the simulate ingest API {es-pull}111161[#111161] -// * Adding `index_template_substitutions` to the simulate ingest API {es-pull}114128[#114128] -// * Adding component template substitutions to the simulate ingest API {es-pull}113276[#113276] -// * Adding mapping validation to the simulate ingest API {es-pull}110606[#110606] -// * Adding support for additional mapping to simulate ingest API {es-pull}114742[#114742] -// * Adding support for simulate ingest mapping adddition for indices with mappings that do not come from templates {es-pull}115359[#115359] -// * Adds example plugin for custom ingest processor {es-pull}112282[#112282] (issue: {es-issue}111539[#111539]) -// * Fix unnecessary mustache template evaluation {es-pull}110986[#110986] (issue: {es-issue}110191[#110191]) -// * Listing all available databases in the _ingest/geoip/database API {es-pull}113498[#113498] -// * Make enrich cache based on memory usage {es-pull}111412[#111412] (issue: {es-issue}106081[#106081]) -// * Tag redacted document in ingest metadata {es-pull}113552[#113552] -// * Verify Maxmind database types in the geoip processor {es-pull}114527[#114527] -// -// Logs:: -// * Add validation for synthetic source mode in logs mode indices {es-pull}110677[#110677] -// * Store original source for keywords using a normalizer {es-pull}112151[#112151] -// -// Machine Learning:: -// * Add Completion Inference API for Alibaba Cloud AI Search Model {es-pull}112512[#112512] -// * Add DeBERTa-V2/V3 tokenizer {es-pull}111852[#111852] -// * Add Streaming Inference spec {es-pull}113812[#113812] -// * Add chunking settings configuration to `CohereService,` `AmazonBedrockService,` and `AzureOpenAiService` {es-pull}113897[#113897] -// * Add chunking settings configuration to `ElasticsearchService/ELSER` {es-pull}114429[#114429] -// * Add custom rule parameters to force time shift {es-pull}110974[#110974] -// * Adding chunking settings to `GoogleVertexAiService,` `AzureAiStudioService,` and `AlibabaCloudSearchService` {es-pull}113981[#113981] -// * Adding chunking settings to `MistralService,` `GoogleAiStudioService,` and `HuggingFaceService` {es-pull}113623[#113623] -// * Adds a new Inference API for streaming responses back to the user. {es-pull}113158[#113158] -// * Create `StreamingHttpResultPublisher` {es-pull}112026[#112026] -// * Create an ml node inference endpoint referencing an existing model {es-pull}114750[#114750] -// * Default inference endpoint for ELSER {es-pull}113873[#113873] -// * Default inference endpoint for the multilingual-e5-small model {es-pull}114683[#114683] -// * Enable OpenAI Streaming {es-pull}113911[#113911] -// * Filter empty task settings objects from the API response {es-pull}114389[#114389] -// * Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` to 100_000 {es-pull}115041[#115041] -// * Migrate Inference to `ChunkedToXContent` {es-pull}111655[#111655] -// * Register Task while Streaming {es-pull}112369[#112369] -// * Server-Sent Events for Inference response {es-pull}112565[#112565] -// * Stream Anthropic Completion {es-pull}114321[#114321] -// * Stream Azure Completion {es-pull}114464[#114464] -// * Stream Bedrock Completion {es-pull}114732[#114732] -// * Stream Cohere Completion {es-pull}114080[#114080] -// * Stream Google Completion {es-pull}114596[#114596] -// * Stream OpenAI Completion {es-pull}112677[#112677] -// * Support sparse embedding models in the elasticsearch inference service {es-pull}112270[#112270] -// * Switch default chunking strategy to sentence {es-pull}114453[#114453] -// * Upgrade to AWS SDK v2 {es-pull}114309[#114309] (issue: {es-issue}110590[#110590]) -// * Use the same chunking configurations for models in the Elasticsearch service {es-pull}111336[#111336] -// * Validate streaming HTTP Response {es-pull}112481[#112481] -// * Wait for allocation on scale up {es-pull}114719[#114719] -// * [Inference API] Add Alibaba Cloud AI Search Model support to Inference API {es-pull}111181[#111181] -// * [Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API {es-pull}111181[#111181] -// * [Inference API] Introduce Update API to change some aspects of existing inference endpoints {es-pull}114457[#114457] -// * [Inference API] Prevent inference endpoints from being deleted if they are referenced by semantic text {es-pull}110399[#110399] -// * [Inference API] alibabacloud ai search service support chunk infer to support semantic_text field {es-pull}110399[#110399] -// -// Mapping:: -// * Add Field caps support for Semantic Text {es-pull}111809[#111809] -// * Add Lucene segment-level fields stats {es-pull}111123[#111123] -// * Add Search Inference ID To Semantic Text Mapping {es-pull}113051[#113051] -// * Add object param for keeping synthetic source {es-pull}113690[#113690] -// * Add support for multi-value dimensions {es-pull}112645[#112645] (issue: {es-issue}110387[#110387]) -// * Allow dimension fields to have multiple values in standard and logsdb index mode {es-pull}112345[#112345] (issues: {es-issue}112232[#112232], {es-issue}112239[#112239]) -// * Allow fields with dots in sparse vector field mapper {es-pull}111981[#111981] (issue: {es-issue}109118[#109118]) -// * Allow querying `index_mode` {es-pull}110676[#110676] -// * Configure keeping source in `FieldMapper` {es-pull}112706[#112706] -// * Control storing array source with index setting {es-pull}112397[#112397] -// * Introduce mode `subobjects=auto` for objects {es-pull}110524[#110524] -// * Update `semantic_text` field to support indexing numeric and boolean data types {es-pull}111284[#111284] -// * Use ELSER By Default For Semantic Text {es-pull}113563[#113563] -// * Use fallback synthetic source for `copy_to` and doc_values: false cases {es-pull}112294[#112294] (issues: {es-issue}110753[#110753], {es-issue}110038[#110038], {es-issue}109546[#109546]) -// -// Network:: -// * Add links to network disconnect troubleshooting {es-pull}112330[#112330] -// -// Ranking:: -// * Add timeout and cancellation check to rescore phase {es-pull}115048[#115048] -// -// Recovery:: -// * Trigger merges after recovery {es-pull}113102[#113102] -// -// Relevance:: -// * Add a query rules tester API call {es-pull}114168[#114168] -// -// Search:: -// * Add initial support for `semantic_text` field type {es-pull}113920[#113920] -// * Add more `dense_vector` details for cluster stats field stats {es-pull}113607[#113607] -// * Add range and regexp Intervals {es-pull}111465[#111465] -// * Adding support for `allow_partial_search_results` in PIT {es-pull}111516[#111516] -// * Allow incubating Panama Vector in simdvec, and add vectorized `ipByteBin` {es-pull}112933[#112933] -// * Avoid using concurrent collector manager in `LuceneChangesSnapshot` {es-pull}113816[#113816] -// * Bool query early termination should also consider `must_not` clauses {es-pull}115031[#115031] -// * Deduplicate Kuromoji User Dictionary {es-pull}112768[#112768] -// * Multi term intervals: increase max_expansions {es-pull}112826[#112826] (issue: {es-issue}110491[#110491]) -// * Search coordinator uses `event.ingested` in cluster state to do rewrites {es-pull}111523[#111523] -// * Update cluster stats for retrievers {es-pull}114109[#114109] -// -// Security:: -// * (logger) change from error to warn for short circuiting user {es-pull}112895[#112895] -// * Add asset criticality indices for `kibana_system_user` {es-pull}113588[#113588] -// * Add tier preference to security index settings allowlist {es-pull}111818[#111818] -// * [Service Account] Add `AutoOps` account {es-pull}111316[#111316] -// -// Snapshot/Restore:: -// * Add `max_multipart_parts` setting to S3 repository {es-pull}113989[#113989] -// * Add support for Azure Managed Identity {es-pull}111344[#111344] -// * Add telemetry for repository usage {es-pull}112133[#112133] -// * Add workaround for missing shard gen blob {es-pull}112337[#112337] -// * Clean up dangling S3 multipart uploads {es-pull}111955[#111955] (issues: {es-issue}101169[#101169], {es-issue}44971[#44971]) -// * Execute shard snapshot tasks in shard-id order {es-pull}111576[#111576] (issue: {es-issue}108739[#108739]) -// * Include account name in Azure settings exceptions {es-pull}111274[#111274] -// * Introduce repository integrity verification API {es-pull}112348[#112348] (issue: {es-issue}52622[#52622]) -// * Retry `S3BlobContainer#getRegister` on all exceptions {es-pull}114813[#114813] -// * Track shard snapshot progress during node shutdown {es-pull}112567[#112567] -// -// Stats:: -// * Track search and fetch failure stats {es-pull}113988[#113988] -// -// TSDB:: -// * Add support for boolean dimensions {es-pull}111457[#111457] (issue: {es-issue}111338[#111338]) -// * Stop iterating over all fields to extract @timestamp value {es-pull}110603[#110603] (issue: {es-issue}92297[#92297]) -// * Support booleans in routing path {es-pull}111445[#111445] -// -// Vector Search:: -// * Dense vector field types updatable for int4 {es-pull}110928[#110928] -// * Use native scalar scorer for int8_flat index {es-pull}111071[#111071] -// -// [[feature-9.0.0]] -// [float] -// === New features -// -// Data streams:: -// * Introduce global retention in data stream lifecycle. {es-pull}111972[#111972] -// * X-pack/plugin/otel: introduce x-pack-otel plugin {es-pull}111091[#111091] -// -// ES|QL:: -// * Add ESQL match function {es-pull}113374[#113374] -// * ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security solution {es-pull}109017[#109017] -// * ESQL: Add async ID and `is_running` headers to ESQL async query {es-pull}111840[#111840] -// * ESQL: Add boolean support to Max and Min aggs {es-pull}110527[#110527] -// * ESQL: Add boolean support to TOP aggregation {es-pull}110718[#110718] -// * ESQL: Added `mv_percentile` function {es-pull}111749[#111749] (issue: {es-issue}111591[#111591]) -// * ESQL: INLINESTATS {es-pull}109583[#109583] (issue: {es-issue}107589[#107589]) -// * ESQL: Introduce per agg filter {es-pull}113735[#113735] -// * ESQL: Strings support for MAX and MIN aggregations {es-pull}111544[#111544] -// * ESQL: Support IP fields in MAX and MIN aggregations {es-pull}110921[#110921] -// * ESQL: TOP aggregation IP support {es-pull}111105[#111105] -// * ESQL: TOP support for strings {es-pull}113183[#113183] (issue: {es-issue}109849[#109849]) -// * ESQL: `mv_median_absolute_deviation` function {es-pull}112055[#112055] (issue: {es-issue}111590[#111590]) -// * Remove snapshot build restriction for match and qstr functions {es-pull}114482[#114482] -// * Search in ES|QL: Add MATCH operator {es-pull}110971[#110971] -// -// ILM+SLM:: -// * SLM Interval based scheduling {es-pull}110847[#110847] -// -// Inference:: -// * EIS integration {es-pull}111154[#111154] -// -// Ingest Node:: -// * Add a `terminate` ingest processor {es-pull}114157[#114157] (issue: {es-issue}110218[#110218]) -// -// Machine Learning:: -// * Inference autoscaling {es-pull}109667[#109667] -// * Telemetry for inference adaptive allocations {es-pull}110630[#110630] -// -// Relevance:: -// * [Query rules] Add `exclude` query rule type {es-pull}111420[#111420] -// -// Search:: -// * Async search: Add ID and "is running" http headers {es-pull}112431[#112431] (issue: {es-issue}109576[#109576]) -// * Cross-cluster search telemetry {es-pull}113825[#113825] -// -// Vector Search:: -// * Adding new bbq index types behind a feature flag {es-pull}114439[#114439] + +Aggregations:: +* Handle with `illegalArgumentExceptions` negative values in HDR percentile aggregations {es-pull}116174[#116174] (issue: {es-issue}115777[#115777]) + +Analysis:: +* Adjust analyze limit exception to be a `bad_request` {es-pull}116325[#116325] + +CCS:: +* Fix long metric deserialize & add - auto-resize needs to be set manually {es-pull}117105[#117105] (issue: {es-issue}116914[#116914]) + +CRUD:: +* Preserve thread context when waiting for segment generation in RTG {es-pull}114623[#114623] +* Standardize error code when bulk body is invalid {es-pull}114869[#114869] + +Data streams:: +* Add missing header in `put_data_lifecycle` rest-api-spec {es-pull}116292[#116292] + +EQL:: +* Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` {es-pull}114819[#114819] (issue: {es-issue}114599[#114599]) + +ES|QL:: +* Added stricter range type checks and runtime warnings for ENRICH {es-pull}115091[#115091] (issues: {es-issue}107357[#107357], {es-issue}116799[#116799]) +* Don't return TEXT type for functions that take TEXT {es-pull}114334[#114334] (issues: {es-issue}111537[#111537], {es-issue}114333[#114333]) +* ESQL: Fix sorts containing `_source` {es-pull}116980[#116980] (issue: {es-issue}116659[#116659]) +* ESQL: fix the column position in errors {es-pull}117153[#117153] +* ES|QL: Fix stats by constant expression {es-pull}114899[#114899] +* Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions {es-pull}116583[#116583] (issues: {es-issue}116529[#116529], {es-issue}116544[#116544]) +* Fix TDigestState.read CB leaks {es-pull}114303[#114303] (issue: {es-issue}114194[#114194]) +* Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` {es-pull}114665[#114665] (issue: {es-issue}105095[#105095]) +* Use `SearchStats` instead of field.isAggregatable in data node planning {es-pull}115744[#115744] (issue: {es-issue}115737[#115737]) +* [ESQL] Fix Binary Comparisons on Date Nanos {es-pull}116346[#116346] +* [ES|QL] To_DatePeriod and To_TimeDuration return better error messages on `union_type` fields {es-pull}114934[#114934] + +Infra/CLI:: +* Fix NPE on plugin sync {es-pull}115640[#115640] (issue: {es-issue}114818[#114818]) + +Infra/Metrics:: +* Make `randomInstantBetween` always return value in range [minInstant, `maxInstant]` {es-pull}114177[#114177] + +Infra/REST API:: +* Fixed a `NullPointerException` in `_capabilities` API when the `path` parameter is null. {es-pull}113413[#113413] (issue: {es-issue}113413[#113413]) + +Infra/Settings:: +* Don't allow secure settings in YML config (109115) {es-pull}115779[#115779] (issue: {es-issue}109115[#109115]) + +Ingest Node:: +* Add warning headers for ingest pipelines containing special characters {es-pull}114837[#114837] (issue: {es-issue}104411[#104411]) +* Reducing error-level stack trace logging for normal events in `GeoIpDownloader` {es-pull}114924[#114924] + +Logs:: +* Always check if index mode is logsdb {es-pull}116922[#116922] +* Prohibit changes to index mode, source, and sort settings during resize {es-pull}115812[#115812] + +Machine Learning:: +* Fix bug in ML autoscaling when some node info is unavailable {es-pull}116650[#116650] +* Fix deberta tokenizer bug caused by bug in normalizer {es-pull}117189[#117189] +* Hides `hugging_face_elser` service from the `GET _inference/_services API` {es-pull}116664[#116664] (issue: {es-issue}116644[#116644]) +* Mitigate IOSession timeouts {es-pull}115414[#115414] (issues: {es-issue}114385[#114385], {es-issue}114327[#114327], {es-issue}114105[#114105], {es-issue}114232[#114232]) +* Propagate scoring function through random sampler {es-pull}116957[#116957] (issue: {es-issue}110134[#110134]) +* Update Deberta tokenizer {es-pull}116358[#116358] +* Wait for up to 2 seconds for yellow status before starting search {es-pull}115938[#115938] (issues: {es-issue}107777[#107777], {es-issue}105955[#105955], {es-issue}107815[#107815], {es-issue}112191[#112191]) + +Mapping:: +* Change synthetic source logic for `constant_keyword` {es-pull}117182[#117182] (issue: {es-issue}117083[#117083]) +* Ignore conflicting fields during dynamic mapping update {es-pull}114227[#114227] (issue: {es-issue}114228[#114228]) + +Network:: +* Use underlying `ByteBuf` `refCount` for `ReleasableBytesReference` {es-pull}116211[#116211] + +Ranking:: +* Propagating nested `inner_hits` to the parent compound retriever {es-pull}116408[#116408] (issue: {es-issue}116397[#116397]) + +Relevance:: +* Fix handling of bulk requests with semantic text fields and delete ops {es-pull}116942[#116942] + +Search:: +* Catch and handle disconnect exceptions in search {es-pull}115836[#115836] +* Fields caps does not honour ignore_unavailable {es-pull}116021[#116021] (issue: {es-issue}107767[#107767]) +* Fix handling of time exceeded exception in fetch phase {es-pull}116676[#116676] +* Fix leak in `DfsQueryPhase` and introduce search disconnect stress test {es-pull}116060[#116060] (issue: {es-issue}115056[#115056]) +* Inconsistency in the _analyzer api when the index is not included {es-pull}115930[#115930] +* Semantic text simple partial update {es-pull}116478[#116478] +* Updated Date Range to Follow Documentation When Assuming Missing Values {es-pull}112258[#112258] (issue: {es-issue}111484[#111484]) +* Validate missing shards after the coordinator rewrite {es-pull}116382[#116382] +* _validate does not honour ignore_unavailable {es-pull}116656[#116656] (issue: {es-issue}116594[#116594]) + +Snapshot/Restore:: +* Retry throttled snapshot deletions {es-pull}113237[#113237] + +Vector Search:: +* Update Semantic Query To Handle Zero Size Responses {es-pull}116277[#116277] (issue: {es-issue}116083[#116083]) + +Watcher:: +* Watch Next Run Interval Resets On Shard Move or Node Restart {es-pull}115102[#115102] (issue: {es-issue}111433[#111433]) + +[[deprecation-9.0.0]] +[float] +=== Deprecations + +Ingest Node:: +* Fix `_type` deprecation on simulate pipeline API {es-pull}116259[#116259] + +Machine Learning:: +* [Inference API] Deprecate elser service {es-pull}113216[#113216] + +Mapping:: +* Deprecate `_source.mode` in mappings {es-pull}116689[#116689] + +[[enhancement-9.0.0]] +[float] +=== Enhancements + +Allocation:: +* Only publish desired balance gauges on master {es-pull}115383[#115383] + +Authorization:: +* Add a `monitor_stats` privilege and allow that privilege for remote cluster privileges {es-pull}114964[#114964] +* [Security Solution] Add `create_index` to `kibana_system` role for index/DS `.logs-endpoint.action.responses-*` {es-pull}115241[#115241] + +CRUD:: +* Suppress merge-on-recovery for older indices {es-pull}113462[#113462] + +Data streams:: +* Adding a deprecation info API warning for data streams with old indices {es-pull}116447[#116447] +* Apm-data: disable date_detection for all apm data streams {es-pull}116995[#116995] + +Distributed:: +* Metrics for incremental bulk splits {es-pull}116765[#116765] +* Use Azure blob batch API to delete blobs in batches {es-pull}114566[#114566] + +ES|QL:: +* Add ES|QL `bit_length` function {es-pull}115792[#115792] +* ESQL: Honor skip_unavailable setting for nonmatching indices errors at planning time {es-pull}116348[#116348] (issue: {es-issue}114531[#114531]) +* ESQL: Remove parent from `FieldAttribute` {es-pull}112881[#112881] +* ESQL: extract common filter from aggs {es-pull}115678[#115678] +* ESQL: optimise aggregations filtered by false/null into evals {es-pull}115858[#115858] +* ES|QL CCS uses `skip_unavailable` setting for handling disconnected remote clusters {es-pull}115266[#115266] (issue: {es-issue}114531[#114531]) +* ES|QL: add metrics for functions {es-pull}114620[#114620] +* Esql Enable Date Nanos (tech preview) {es-pull}117080[#117080] +* Support partial sort fields in TopN pushdown {es-pull}116043[#116043] (issue: {es-issue}114515[#114515]) +* [ES|QL] Implicit casting string literal to intervals {es-pull}115814[#115814] (issue: {es-issue}115352[#115352]) + +Health:: +* Increase `replica_unassigned_buffer_time` default from 3s to 5s {es-pull}112834[#112834] + +Indices APIs:: +* Ensure class resource stream is closed in `ResourceUtils` {es-pull}116437[#116437] + +Inference:: +* Add version prefix to Inference Service API path {es-pull}117095[#117095] + +Infra/Circuit Breakers:: +* Add link to Circuit Breaker "Data too large" exception message {es-pull}113561[#113561] + +Infra/Core:: +* Support for unsigned 64 bit numbers in Cpu stats {es-pull}114681[#114681] (issue: {es-issue}112274[#112274]) + +Infra/Metrics:: +* Add `ensureGreen` test method for use with `adminClient` {es-pull}113425[#113425] + +Infra/Scripting:: +* Add a `mustache.max_output_size_bytes` setting to limit the length of results from mustache scripts {es-pull}114002[#114002] + +Ingest Node:: +* Add postal_code support to the City and Enterprise databases {es-pull}114193[#114193] +* Add support for registered country fields for maxmind geoip databases {es-pull}114521[#114521] +* Adding support for additional mapping to simulate ingest API {es-pull}114742[#114742] +* Adding support for simulate ingest mapping adddition for indices with mappings that do not come from templates {es-pull}115359[#115359] +* Support IPinfo database configurations {es-pull}114548[#114548] +* Support more maxmind fields in the geoip processor {es-pull}114268[#114268] + +Logs:: +* Add logsdb telemetry {es-pull}115994[#115994] +* Add num docs and size to logsdb telemetry {es-pull}116128[#116128] +* Feature: re-structure document ID generation favoring _id inverted index compression {es-pull}104683[#104683] + +Machine Learning:: +* Add DeBERTa-V2/V3 tokenizer {es-pull}111852[#111852] +* Add special case for elastic reranker in inference API {es-pull}116962[#116962] +* Adding inference endpoint validation for `AzureAiStudioService` {es-pull}113713[#113713] +* Adds support for `input_type` field to Vertex inference service {es-pull}116431[#116431] +* Enable built-in Inference Endpoints and default for Semantic Text {es-pull}116931[#116931] +* Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` to 100_000 {es-pull}115041[#115041] +* Inference duration and error metrics {es-pull}115876[#115876] +* Remove all mentions of eis and gateway and deprecate flags that do {es-pull}116692[#116692] +* [Inference API] Add API to get configuration of inference services {es-pull}114862[#114862] +* [Inference API] Improve chunked results error message {es-pull}115807[#115807] + +Network:: +* Allow http unsafe buffers by default {es-pull}116115[#116115] + +Recovery:: +* Attempt to clean up index before remote transfer {es-pull}115142[#115142] (issue: {es-issue}104473[#104473]) +* Trigger merges after recovery {es-pull}113102[#113102] + +Reindex:: +* Change Reindexing metrics unit from millis to seconds {es-pull}115721[#115721] + +Relevance:: +* Add query rules retriever {es-pull}114855[#114855] +* Add tracking for query rule types {es-pull}116357[#116357] + +Search:: +* Add Search Phase APM metrics {es-pull}113194[#113194] +* Add `docvalue_fields` Support for `dense_vector` Fields {es-pull}114484[#114484] (issue: {es-issue}108470[#108470]) +* Add initial support for `semantic_text` field type {es-pull}113920[#113920] +* Adds access to flags no_sub_matches and no_overlapping_matches to hyphenation-decompounder-tokenfilter {es-pull}115459[#115459] (issue: {es-issue}97849[#97849]) +* Better sizing `BytesRef` for Strings in Queries {es-pull}115655[#115655] +* Enable `_tier` based coordinator rewrites for all indices (not just mounted indices) {es-pull}115797[#115797] +* Only aggregations require at least one shard request {es-pull}115314[#115314] + +Security:: +* Add refresh `.security` index call between security migrations {es-pull}114879[#114879] + +Snapshot/Restore:: +* Improve message about insecure S3 settings {es-pull}116915[#116915] +* Retry `S3BlobContainer#getRegister` on all exceptions {es-pull}114813[#114813] +* Split searchable snapshot into multiple repo operations {es-pull}116918[#116918] +* Track shard snapshot progress during node shutdown {es-pull}112567[#112567] + +Vector Search:: +* Add support for bitwise inner-product in painless {es-pull}116082[#116082] + +[[feature-9.0.0]] +[float] +=== New features + +Data streams:: +* Add default ILM policies and switch to ILM for apm-data plugin {es-pull}115687[#115687] + +ES|QL:: +* Add support for `BYTE_LENGTH` scalar function {es-pull}116591[#116591] +* Esql/lookup join grammar {es-pull}116515[#116515] +* Remove snapshot build restriction for match and qstr functions {es-pull}114482[#114482] + +Search:: +* ESQL - Add match operator (:) {es-pull}116819[#116819] [[upgrade-9.0.0]] [float] === Upgrades -// -// Infra/Core:: -// * Upgrade xcontent to Jackson 2.17.0 {es-pull}111948[#111948] -// * Upgrade xcontent to Jackson 2.17.2 {es-pull}112320[#112320] -// -// Infra/Metrics:: -// * Update APM Java Agent to support JDK 23 {es-pull}115194[#115194] (issues: {es-issue}115101[#115101], {es-issue}115100[#115100]) -// -// Search:: -// * Upgrade to Lucene 10 {es-pull}114741[#114741] -// * Upgrade to Lucene 9.12 {es-pull}113333[#113333] -// -// Snapshot/Restore:: -// * Upgrade Azure SDK {es-pull}111225[#111225] -// * Upgrade `repository-azure` dependencies {es-pull}112277[#112277] + +Search:: +* Upgrade to Lucene 10 {es-pull}114741[#114741] diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index edecd4f727583..b87081639c684 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,6 +1,3 @@ -// THIS IS A GENERATED FILE. DO NOT EDIT DIRECTLY. -// The content generated here are is not correct and most has been manually commented out until it can be fixed. -// See ES-9931 for more details. [[release-highlights]] == What's new in {minor-version} @@ -12,163 +9,14 @@ For detailed information about this release, see the <> and <>. endif::[] -// -// // tag::notable-highlights[] -// -// [discrete] -// [[esql_inlinestats]] -// === ESQL: INLINESTATS -// This adds the `INLINESTATS` command to ESQL which performs a STATS and -// then enriches the results into the output stream. So, this query: -// -// [source,esql] -// ---- -// FROM test -// | INLINESTATS m=MAX(a * b) BY b -// | WHERE m == a * b -// | SORT a DESC, b DESC -// | LIMIT 3 -// ---- -// -// Produces output like: -// -// | a | b | m | -// | --- | --- | ----- | -// | 99 | 999 | 98901 | -// | 99 | 998 | 98802 | -// | 99 | 997 | 98703 | -// -// {es-pull}109583[#109583] -// -// [discrete] -// [[always_allow_rebalancing_by_default]] -// === Always allow rebalancing by default -// In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to -// `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was -// appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has -// better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some -// shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From -// version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. -// -// {es-pull}111015[#111015] -// -// [discrete] -// [[add_global_retention_in_data_stream_lifecycle]] -// === Add global retention in data stream lifecycle -// Data stream lifecycle now supports configuring retention on a cluster level, -// namely global retention. Global retention \nallows us to configure two different -// retentions: -// -// - `data_streams.lifecycle.retention.default` is applied to all data streams managed -// by the data stream lifecycle that do not have retention defined on the data stream level. -// - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the -// data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. -// -// {es-pull}111972[#111972] -// -// [discrete] -// [[enable_zstandard_compression_for_indices_with_index_codec_set_to_best_compression]] -// === Enable ZStandard compression for indices with index.codec set to best_compression -// Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to -// best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with -// index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a -// similar indexing throughput depending on what options are used. Experiments with indexing logs have shown that -// ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput compared to DEFLATE. -// -// {es-pull}112665[#112665] -// -// [discrete] -// [[esql_introduce_per_agg_filter]] -// === ESQL: Introduce per agg filter -// Add support for aggregation scoped filters that work dynamically on the -// data in each group. -// -// [source,esql] -// ---- -// | STATS success = COUNT(*) WHERE 200 <= code AND code < 300, -// redirect = COUNT(*) WHERE 300 <= code AND code < 400, -// client_err = COUNT(*) WHERE 400 <= code AND code < 500, -// server_err = COUNT(*) WHERE 500 <= code AND code < 600, -// total_count = COUNT(*) -// ---- -// -// Implementation wise, the base AggregateFunction has been extended to -// allow a filter to be passed on. This is required to incorporate the -// filter as part of the aggregate equality/identity which would fail with -// the filter as an external component. -// As part of the process, the serialization for the existing aggregations -// had to be fixed so AggregateFunction implementations so that it -// delegates to their parent first. -// -// {es-pull}113735[#113735] -// -// // end::notable-highlights[] -// -// -// [discrete] -// [[esql_multi_value_fields_supported_in_geospatial_predicates]] -// === ESQL: Multi-value fields supported in Geospatial predicates -// Supporting multi-value fields in `WHERE` predicates is a challenge due to not knowing whether `ALL` or `ANY` -// of the values in the field should pass the predicate. -// For example, should the field `age:[10,30]` pass the predicate `WHERE age>20` or not? -// This ambiguity does not exist with the spatial predicates -// `ST_INTERSECTS` and `ST_DISJOINT`, because the choice between `ANY` or `ALL` -// is implied by the predicate itself. -// Consider a predicate checking a field named `location` against a test geometry named `shape`: -// -// * `ST_INTERSECTS(field, shape)` - true if `ANY` value can intersect the shape -// * `ST_DISJOINT(field, shape)` - true only if `ALL` values are disjoint from the shape -// -// This works even if the shape argument is itself a complex or compound geometry. -// -// Similar logic exists for `ST_CONTAINS` and `ST_WITHIN` predicates, but these are not as easily solved -// with `ANY` or `ALL`, because a collection of geometries contains another collection if each of the contained -// geometries is within at least one of the containing geometries. Evaluating this requires that the multi-value -// field is first combined into a single geometry before performing the predicate check. -// -// * `ST_CONTAINS(field, shape)` - true if the combined geometry contains the shape -// * `ST_WITHIN(field, shape)` - true if the combined geometry is within the shape -// -// {es-pull}112063[#112063] -// -// [discrete] -// [[enhance_sort_push_down_to_lucene_to_cover_references_to_fields_st_distance_function]] -// === Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function -// The most used and likely most valuable geospatial search query in Elasticsearch is the sorted proximity search, -// finding items within a certain distance of a point of interest and sorting the results by distance. -// This has been possible in ES|QL since 8.15.0, but the sorting was done in-memory, not pushed down to Lucene. -// Now the sorting is pushed down to Lucene, which results in a significant performance improvement. -// -// Queries that perform both filtering and sorting on distance are supported. For example: -// -// [source,esql] -// ---- -// FROM test -// | EVAL distance = ST_DISTANCE(location, TO_GEOPOINT("POINT(37.7749, -122.4194)")) -// | WHERE distance < 1000000 -// | SORT distance ASC, name DESC -// | LIMIT 10 -// ---- -// -// In addition, the support for sorting on EVAL expressions has been extended to cover references to fields: -// -// [source,esql] -// ---- -// FROM test -// | EVAL ref = field -// | SORT ref ASC -// | LIMIT 10 -// ---- -// -// {es-pull}112938[#112938] -// + +// The notable-highlights tag marks entries that +// should be featured in the Stack Installation and Upgrade Guide: +// tag::notable-highlights[] // [discrete] -// [[cross_cluster_search_telemetry]] -// === Cross-cluster search telemetry -// The cross-cluster search telemetry is collected when cross-cluster searches -// are performed, and is returned as "ccs" field in `_cluster/stats` output. -// It also add a new parameter `include_remotes=true` to the `_cluster/stats` API -// which will collect data from connected remote clusters. +// === Heading // -// {es-pull}113825[#113825] +// Description. +// end::notable-highlights[] + diff --git a/docs/reference/reranking/semantic-reranking.asciidoc b/docs/reference/reranking/semantic-reranking.asciidoc index 4ebe90e44708e..e1e2abd224a8e 100644 --- a/docs/reference/reranking/semantic-reranking.asciidoc +++ b/docs/reference/reranking/semantic-reranking.asciidoc @@ -85,14 +85,16 @@ In {es}, semantic re-rankers are implemented using the {es} <> using the `rerank` task type -** Integrate directly with the <> using the `rerank` task type -** Upload a model to {es} from Hugging Face with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland]. You'll need to use the `text_similarity` NLP task type when loading the model using Eland. Refer to {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-similarity[the Elastic NLP model reference] for a list of third party text similarity models supported by {es} for semantic re-ranking. -*** Then set up an <> with the `rerank` task type -. *Create a `rerank` task using the <>*. +. *Select and configure a re-ranking model*. +You have the following options: +.. Use the <> cross-encoder model via the inference API's {es} service. +.. Use the <> to create a `rerank` endpoint. +.. Use the <> to create a `rerank` endpoint. +.. Upload a model to {es} from Hugging Face with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland]. You'll need to use the `text_similarity` NLP task type when loading the model using Eland. Then set up an <> with the `rerank` endpoint type. ++ +Refer to {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-similarity[the Elastic NLP model reference] for a list of third party text similarity models supported by {es} for semantic re-ranking. + +. *Create a `rerank` endpoint using the <>*. The Inference API creates an inference endpoint and configures your chosen machine learning model to perform the re-ranking task. . *Define a `text_similarity_reranker` retriever in your search request*. The retriever syntax makes it simple to configure both the retrieval and re-ranking of search results in a single API call. @@ -117,7 +119,7 @@ POST _search } }, "field": "text", - "inference_id": "my-cohere-rerank-model", + "inference_id": "elastic-rerank", "inference_text": "How often does the moon hide the sun?", "rank_window_size": 100, "min_score": 0.5 diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index 0da75ac30d2dd..b90b7e312c790 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -11,6 +11,7 @@ This allows for complex behavior to be depicted in a tree-like structure, called [TIP] ==== Refer to <> for a high level overview of the retrievers abstraction. +Refer to <> for additional examples. ==== The following retrievers are available: @@ -382,16 +383,17 @@ Refer to <> for a high level overview of semantic re-ranking ===== Prerequisites -To use `text_similarity_reranker` you must first set up a `rerank` task using the <>. -The `rerank` task should be set up with a machine learning model that can compute text similarity. +To use `text_similarity_reranker` you must first set up an inference endpoint for the `rerank` task using the <>. +The endpoint should be set up with a machine learning model that can compute text similarity. Refer to {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-similarity[the Elastic NLP model reference] for a list of third-party text similarity models supported by {es}. -Currently you can: +You have the following options: -* Integrate directly with the <> using the `rerank` task type -* Integrate directly with the <> using the `rerank` task type +* Use the the built-in <> cross-encoder model via the inference API's {es} service. +* Use the <> with the `rerank` task type. +* Use the <> with the `rerank` task type. * Upload a model to {es} with {eland-docs}/machine-learning.html#ml-nlp-pytorch[Eland] using the `text_similarity` NLP task type. -** Then set up an <> with the `rerank` task type +** Then set up an <> with the `rerank` task type. ** Refer to the <> on this page for a step-by-step guide. ===== Parameters @@ -436,13 +438,70 @@ Note that score calculations vary depending on the model used. Applies the specified <> to the child <>. If the child retriever already specifies any filters, then this top-level filter is applied in conjuction with the filter defined in the child retriever. +[discrete] +[[text-similarity-reranker-retriever-example-elastic-rerank]] +==== Example: Elastic Rerank + +This examples demonstrates how to deploy the Elastic Rerank model and use it to re-rank search results using the `text_similarity_reranker` retriever. + +Follow these steps: + +. Create an inference endpoint for the `rerank` task using the <>. ++ +[source,console] +---- +PUT _inference/rerank/my-elastic-rerank +{ + "service": "elasticsearch", + "service_settings": { + "model_id": ".rerank-v1", + "num_threads": 1, + "adaptive_allocations": { <1> + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + } + } +} +---- +// TEST[skip:uses ML] +<1> {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[Adaptive allocations] will be enabled with the minimum of 1 and the maximum of 10 allocations. ++ +. Define a `text_similarity_rerank` retriever: ++ +[source,console] +---- +POST _search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match": { + "text": "How often does the moon hide the sun?" + } + } + } + }, + "field": "text", + "inference_id": "my-elastic-rerank", + "inference_text": "How often does the moon hide the sun?", + "rank_window_size": 100, + "min_score": 0.5 + } + } +} +---- +// TEST[skip:uses ML] + [discrete] [[text-similarity-reranker-retriever-example-cohere]] ==== Example: Cohere Rerank This example enables out-of-the-box semantic search by re-ranking top documents using the Cohere Rerank API. This approach eliminates the need to generate and store embeddings for all indexed documents. -This requires a <> using the `rerank` task type. +This requires a <> that is set up for the `rerank` task type. [source,console] ---- @@ -680,6 +739,12 @@ GET movies/_search <1> The `rule` retriever is the outermost retriever, applying rules to the search results that were previously reranked using the `rrf` retriever. <2> The `rrf` retriever returns results from all of its sub-retrievers, and the output of the `rrf` retriever is used as input to the `rule` retriever. +[discrete] +[[retriever-common-parameters]] +=== Common usage guidelines + +[discrete] +[[retriever-size-pagination]] ==== Using `from` and `size` with a retriever tree The <> and <> @@ -688,12 +753,16 @@ parameters are provided globally as part of the general They are applied to all retrievers in a retriever tree, unless a specific retriever overrides the `size` parameter using a different parameter such as `rank_window_size`. Though, the final search hits are always limited to `size`. +[discrete] +[[retriever-aggregations]] ==== Using aggregations with a retriever tree <> are globally specified as part of a search request. The query used for an aggregation is the combination of all leaf retrievers as `should` clauses in a <>. +[discrete] +[[retriever-restrictions]] ==== Restrictions on search parameters when specifying a retriever When a retriever is specified as part of a search, the following elements are not allowed at the top-level. @@ -704,5 +773,3 @@ Instead they are only allowed as elements of specific retrievers: * <> * <> * <> -* <> - diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index edd3b67e3de04..a942c0162a80a 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -105,7 +105,7 @@ The `rrf` retriever does not currently support: * <> Using unsupported features as part of a search with an `rrf` retriever results in an exception. -+ + IMPORTANT: It is best to avoid providing a <> as part of the request, as RRF creates one internally that is shared by all sub-retrievers to ensure consistent results. @@ -703,3 +703,99 @@ So for the same params as above, we would now have: * `from=0, size=2` would return [`1`, `5`] with ranks `[1, 2]` * `from=2, size=2` would return an empty result set as it would fall outside the available `rank_window_size` results. + +==== Aggregations in RRF + +The `rrf` retriever supports aggregations from all specified sub-retrievers. Important notes about aggregations: + +* They operate on the complete result set from all sub-retrievers +* They are not limited by the `rank_window_size` parameter +* They process the union of all matching documents + +For example, consider the following document set: +[source,js] +---- +{ + "_id": 1, "termA": "foo", + "_id": 2, "termA": "foo", "termB": "bar", + "_id": 3, "termA": "aardvark", "termB": "bar", + "_id": 4, "termA": "foo", "termB": "bar" +} +---- +// NOTCONSOLE + +Perform a term aggregation on the `termA` field using an `rrf` retriever: +[source,js] +---- +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "term": { + "termB": "bar" + } + } + } + }, + { + "standard": { + "query": { + "match_all": { } + } + } + } + ], + "rank_window_size": 1 + } + }, + "size": 1, + "aggs": { + "termA_agg": { + "terms": { + "field": "termA" + } + } + } +} +---- +// NOTCONSOLE + +The aggregation results will include *all* matching documents, regardless of `rank_window_size`. +[source, js] +---- +{ + "foo": 3, + "aardvark": 1 +} + +---- +// NOTCONSOLE + +==== Highlighting in RRF + +Using the `rrf` retriever, you can add <> to show relevant text snippets in your search results. Highlighted snippets are computed based +on the matching text queries defined on the sub-retrievers. + +IMPORTANT: Highlighting on vector fields, using either the `knn` retriever or a `knn` query, is not supported. + +A more specific example of highlighting in RRF can also be found in the <> page. + +==== Inner hits in RRF + +The `rrf` retriever supports <> functionality, allowing you to retrieve +related nested or parent/child documents alongside your main search results. Inner hits can be +specified as part of any nested sub-retriever and will be propagated to the top-level parent +retriever. Note that the inner hit computation will take place only at end of `rrf` retriever's +evaluation on the top matching documents, and not as part of the query execution of the nested +sub-retrievers. + +[IMPORTANT] +==== +When defining multiple `inner_hits` sections across sub-retrievers: + +* Each `inner_hits` section must have a unique name +* Names must be unique across all sub-retrievers in the search request +==== diff --git a/docs/reference/search/search-your-data/highlighting.asciidoc b/docs/reference/search/search-your-data/highlighting.asciidoc index 7ee13d971b035..6a432e6104524 100644 --- a/docs/reference/search/search-your-data/highlighting.asciidoc +++ b/docs/reference/search/search-your-data/highlighting.asciidoc @@ -176,8 +176,6 @@ fragmenter:: Specifies how text should be broken up in highlight snippets: `simple` or `span`. Only valid for the `plain` highlighter. Defaults to `span`. -force_source:: deprecated; this parameter has no effect - `simple`::: Breaks up text into same-sized fragments. `span`::: Breaks up text into same-sized fragments, but tries to avoid breaking up text between highlighted terms. This is helpful when you're diff --git a/docs/reference/search/search-your-data/retrievers-examples.asciidoc b/docs/reference/search/search-your-data/retrievers-examples.asciidoc index 8cd1a4bf5ce98..ad1cc32dcee01 100644 --- a/docs/reference/search/search-your-data/retrievers-examples.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-examples.asciidoc @@ -1,31 +1,16 @@ [[retrievers-examples]] -=== Retrievers examples Learn how to combine different retrievers in these hands-on examples. -To demonstrate the full functionality of retrievers, these examples require access to a <> set up using the <>. + +=== Retrievers examples [discrete] [[retrievers-examples-setup]] ==== Add example data -To begin with, we'll set up the necessary services and have them in place for later use. - -[source,js] ----- -// Setup rerank task stored as `my-rerank-model` -PUT _inference/rerank/my-rerank-model -{ - "service": "cohere", - "service_settings": { - "model_id": "rerank-english-v3.0", - "api_key": "{{COHERE_API_KEY}}" - } -} ----- -//NOTCONSOLE +To begin with, lets create the `retrievers_example` index, and add some documents to it. -Now that we have our reranking service in place, lets create the `retrievers_example` index, and add some documents to it. -[source,js] +[source,console] ---- PUT retrievers_example { @@ -49,11 +34,7 @@ PUT retrievers_example } } } ----- -//NOTCONSOLE -[source,js] ----- POST /retrievers_example/_doc/1 { "vector": [0.23, 0.67, 0.89], @@ -94,10 +75,12 @@ POST /retrievers_example/_doc/5 "topic": ["documentation", "observability", "elastic"] } +POST /retrievers_example/_refresh + ---- -//NOTCONSOLE +// TESTSETUP -Now that we also have our documents in place, let's try to run some queries using retrievers. +Now that we have our documents in place, let's try to run some queries using retrievers. [discrete] [[retrievers-examples-combining-standard-knn-retrievers-with-rrf]] @@ -112,170 +95,272 @@ To implement this in the retriever framework, we start with the top-level elemen retriever. This retriever operates on top of two other retrievers: a `knn` retriever and a `standard` retriever. Our query structure would look like this: -[source,js] +[source,console] ---- GET /retrievers_example/_search { - "retriever":{ - "rrf": { - "retrievers":[ - { - "standard":{ - "query":{ - "query_string":{ - "query": "(information retrieval) OR (artificial intelligence)", - "default_field": "text" - } - } - } - }, - { - "knn": { - "field": "vector", - "query_vector": [ - 0.23, - 0.67, - 0.89 - ], - "k": 3, - "num_candidates": 5 - } - } - ], - "rank_window_size": 10, - "rank_constant": 1 - } - }, - "_source": ["text", "topic"] + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": false +} +---- +// TEST + +This returns the following response based on the final rrf score for each result. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 42, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 3, + "relation": "eq" + }, + "max_score": 0.8333334, + "hits": [ + { + "_index": "retrievers_example", + "_id": "1", + "_score": 0.8333334 + }, + { + "_index": "retrievers_example", + "_id": "2", + "_score": 0.8333334 + }, + { + "_index": "retrievers_example", + "_id": "3", + "_score": 0.25 + } + ] + } } ---- -//NOTCONSOLE +// TESTRESPONSE[s/"took": 42/"took": $body.took/] +============== [discrete] [[retrievers-examples-collapsing-retriever-results]] ==== Example: Grouping results by year with `collapse` In our result set, we have many documents with the same `year` value. We can clean this -up using the `collapse` parameter with our retriever. This enables grouping results by -any field and returns only the highest-scoring document from each group. In this example +up using the `collapse` parameter with our retriever. This, as with the standard <> feature, +enables grouping results by any field and returns only the highest-scoring document from each group. In this example we'll collapse our results based on the `year` field. -[source,js] +[source,console] ---- GET /retrievers_example/_search { - "retriever":{ - "rrf": { - "retrievers":[ - { - "standard":{ - "query":{ - "query_string":{ - "query": "(information retrieval) OR (artificial intelligence)", - "default_field": "text" - } - } - } - }, - { - "knn": { - "field": "vector", - "query_vector": [ - 0.23, - 0.67, - 0.89 - ], - "k": 3, - "num_candidates": 5 - } - } - ], - "rank_window_size": 10, - "rank_constant": 1 - } - }, - "collapse": { - "field": "year", - "inner_hits": { - "name": "topic related documents", - "_source": ["text", "year"] - } - }, - "_source": ["text", "topic"] + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "collapse": { + "field": "year", + "inner_hits": { + "name": "topic related documents", + "_source": [ + "year" + ] + } + }, + "_source": false } ---- -//NOTCONSOLE +// TEST[continued] -[discrete] -[[retrievers-examples-text-similarity-reranker-on-top-of-rrf]] -==== Example: Rerank results of an RRF retriever +This returns the following response with collapsed results. -Previously, we used a `text_similarity_reranker` retriever within an `rrf` retriever. -Because retrievers support full composability, we can also rerank the results of an -`rrf` retriever. Let's apply this to our first example. - -[source,js] +.Example response +[%collapsible] +============== +[source,console-result] ---- -GET retrievers_example/_search { - "retriever": { - "text_similarity_reranker": { - "retriever": { - "rrf": { - "retrievers": [ - { - "standard":{ - "query":{ - "query_string":{ - "query": "(information retrieval) OR (artificial intelligence)", - "default_field": "text" - } - } - } - }, - { - "knn": { - "field": "vector", - "query_vector": [ - 0.23, - 0.67, - 0.89 - ], - "k": 3, - "num_candidates": 5 - } - } - ], - "rank_window_size": 10, - "rank_constant": 1 - } - }, - "field": "text", - "inference_id": "my-rerank-model", - "inference_text": "What are the state of the art applications of AI in information retrieval?" - } - }, - "_source": ["text", "topic"] + "took": 42, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 3, + "relation": "eq" + }, + "max_score": 0.8333334, + "hits": [ + { + "_index": "retrievers_example", + "_id": "1", + "_score": 0.8333334, + "fields": { + "year": [ + 2024 + ] + }, + "inner_hits": { + "topic related documents": { + "hits": { + "total": { + "value": 2, + "relation": "eq" + }, + "max_score": 0.8333334, + "hits": [ + { + "_index": "retrievers_example", + "_id": "1", + "_score": 0.8333334, + "_source": { + "year": 2024 + } + }, + { + "_index": "retrievers_example", + "_id": "3", + "_score": 0.25, + "_source": { + "year": 2024 + } + } + ] + } + } + } + }, + { + "_index": "retrievers_example", + "_id": "2", + "_score": 0.8333334, + "fields": { + "year": [ + 2023 + ] + }, + "inner_hits": { + "topic related documents": { + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 0.8333334, + "hits": [ + { + "_index": "retrievers_example", + "_id": "2", + "_score": 0.8333334, + "_source": { + "year": 2023 + } + } + ] + } + } + } + } + ] + } } - ---- -//NOTCONSOLE +// TESTRESPONSE[s/"took": 42/"took": $body.took/] +============== [discrete] -[[retrievers-examples-rrf-ranking-on-text-similarity-reranker-results]] -==== Example: RRF with semantic reranker +[[retrievers-examples-highlighting-retriever-results]] +==== Example: Highlighting results based on nested sub-retrievers -For this example, we'll replace our semantic query with the `my-rerank-model` -reranker we previously configured. Since this is a reranker, it needs an initial pool of -documents to work with. In this case, we'll filter for documents about `ai` topics. +Highlighting is now also available for nested sub-retrievers matches. For example, consider the same +`rrf` retriever as above, with a `knn` and `standard` retriever as its sub-retrievers. We can specify a `highlight` +section, as defined in <> documentation, and compute highlights for the top results. -[source,js] +[source,console] ---- GET /retrievers_example/_search { "retriever": { "rrf": { "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, { "knn": { "field": "vector", @@ -287,21 +372,221 @@ GET /retrievers_example/_search "k": 3, "num_candidates": 5 } - }, + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "highlight": { + "fields": { + "text": { + "fragment_size": 150, + "number_of_fragments": 3 + } + } + }, + "_source": false +} +---- +// TEST[continued] + +This would highlight the `text` field, based on the matches produced by the `standard` retriever. The highlighted snippets +would then be included in the response as usual, i.e. under each search hit. + +.Example response +[%collapsible] +============== +[source,console-result] +---- +{ + "took": 42, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 3, + "relation": "eq" + }, + "max_score": 0.8333334, + "hits": [ + { + "_index": "retrievers_example", + "_id": "1", + "_score": 0.8333334, + "highlight": { + "text": [ + "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences" + ] + } + }, + { + "_index": "retrievers_example", + "_id": "2", + "_score": 0.8333334, + "highlight": { + "text": [ + "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved" + ] + } + }, + { + "_index": "retrievers_example", + "_id": "3", + "_score": 0.25 + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 42/"took": $body.took/] +============== + +[discrete] +[[retrievers-examples-inner-hits-retriever-results]] +==== Example: Computing inner hits from nested sub-retrievers + +We can also define `inner_hits` to be computed on any of the sub-retrievers, and propagate those computations to the top +level compound retriever. For example, let's create a new index with a `knn` field, nested under the `nested_field` field, +and index a couple of documents. + +[source,console] +---- +PUT retrievers_example_nested +{ + "mappings": { + "properties": { + "nested_field": { + "type": "nested", + "properties": { + "paragraph_id": { + "type": "keyword" + }, + "nested_vector": { + "type": "dense_vector", + "dims": 3, + "similarity": "l2_norm", + "index": true + } + } + }, + "topic": { + "type": "keyword" + } + } + } +} + +POST /retrievers_example_nested/_doc/1 +{ + "nested_field": [ + { + "paragraph_id": "1a", + "nested_vector": [ + -1.12, + -0.59, + 0.78 + ] + }, + { + "paragraph_id": "1b", + "nested_vector": [ + -0.12, + 1.56, + 0.42 + ] + }, + { + "paragraph_id": "1c", + "nested_vector": [ + 1, + -1, + 0 + ] + } + ], + "topic": [ + "ai" + ] +} + +POST /retrievers_example_nested/_doc/2 +{ + "nested_field": [ + { + "paragraph_id": "2a", + "nested_vector": [ + 0.23, + 1.24, + 0.65 + ] + } + ], + "topic": [ + "information_retrieval" + ] +} + +POST /retrievers_example_nested/_doc/3 +{ + "topic": [ + "ai" + ] +} + +POST /retrievers_example_nested/_refresh +---- +// TEST[continued] + +Now we can run an `rrf` retriever query and also compute <> for the `nested_field.nested_vector` +field, based on the `knn` query specified. + +[source,console] +---- +GET /retrievers_example_nested/_search +{ + "retriever": { + "rrf": { + "retrievers": [ { - "text_similarity_reranker": { - "retriever": { - "standard": { + "standard": { + "query": { + "nested": { + "path": "nested_field", + "inner_hits": { + "name": "nested_vector", + "_source": false, + "fields": [ + "nested_field.paragraph_id" + ] + }, "query": { - "term": { - "topic": "ai" + "knn": { + "field": "nested_field.nested_vector", + "query_vector": [ + 1, + 0, + 0.5 + ], + "k": 10 } } } - }, - "field": "text", - "inference_id": "my-rerank-model", - "inference_text": "Can I use generative AI to identify user intent and improve search relevance?" + } + } + }, + { + "standard": { + "query": { + "term": { + "topic": "ai" + } + } } } ], @@ -310,64 +595,184 @@ GET /retrievers_example/_search } }, "_source": [ - "text", "topic" ] } ---- -//NOTCONSOLE - -[discrete] -[[retrievers-examples-chaining-text-similarity-reranker-retrievers]] -==== Example: Chaining multiple semantic rerankers +// TEST[continued] -Full composability means we can chain together multiple retrievers of the same type. For instance, imagine we have a computationally expensive reranker that's specialized for AI content. We can rerank the results of a `text_similarity_reranker` using another `text_similarity_reranker` retriever. Each reranker can operate on different fields and/or use different inference services. +This would propagate the `inner_hits` defined for the `knn` query to the `rrf` retriever, and compute inner hits for `rrf`'s top results. -[source,js] +.Example response +[%collapsible] +============== +[source,console-result] ---- -GET retrievers_example/_search { - "retriever": { - "text_similarity_reranker": { - "retriever": { - "text_similarity_reranker": { - "retriever": { - "knn": { - "field": "vector", - "query_vector": [ - 0.23, - 0.67, - 0.89 - ], - "k": 3, - "num_candidates": 5 - } - }, - "rank_window_size": 100, - "field": "text", - "inference_id": "my-rerank-model", - "inference_text": "What are the state of the art applications of AI in information retrieval?" - } - }, - "rank_window_size": 10, - "field": "text", - "inference_id": "my-other-more-expensive-rerank-model", - "inference_text": "Applications of Large Language Models in technology and their impact on user satisfaction" - } - }, - "_source": [ - "text", - "topic" - ] + "took": 42, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 3, + "relation": "eq" + }, + "max_score": 1.0, + "hits": [ + { + "_index": "retrievers_example_nested", + "_id": "1", + "_score": 1.0, + "_source": { + "topic": [ + "ai" + ] + }, + "inner_hits": { + "nested_vector": { + "hits": { + "total": { + "value": 3, + "relation": "eq" + }, + "max_score": 0.44353113, + "hits": [ + { + "_index": "retrievers_example_nested", + "_id": "1", + "_nested": { + "field": "nested_field", + "offset": 2 + }, + "_score": 0.44353113, + "fields": { + "nested_field": [ + { + "paragraph_id": [ + "1c" + ] + } + ] + } + }, + { + "_index": "retrievers_example_nested", + "_id": "1", + "_nested": { + "field": "nested_field", + "offset": 1 + }, + "_score": 0.26567122, + "fields": { + "nested_field": [ + { + "paragraph_id": [ + "1b" + ] + } + ] + } + }, + { + "_index": "retrievers_example_nested", + "_id": "1", + "_nested": { + "field": "nested_field", + "offset": 0 + }, + "_score": 0.18478848, + "fields": { + "nested_field": [ + { + "paragraph_id": [ + "1a" + ] + } + ] + } + } + ] + } + } + } + }, + { + "_index": "retrievers_example_nested", + "_id": "2", + "_score": 0.33333334, + "_source": { + "topic": [ + "information_retrieval" + ] + }, + "inner_hits": { + "nested_vector": { + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 0.32002488, + "hits": [ + { + "_index": "retrievers_example_nested", + "_id": "2", + "_nested": { + "field": "nested_field", + "offset": 0 + }, + "_score": 0.32002488, + "fields": { + "nested_field": [ + { + "paragraph_id": [ + "2a" + ] + } + ] + } + } + ] + } + } + } + }, + { + "_index": "retrievers_example_nested", + "_id": "3", + "_score": 0.33333334, + "_source": { + "topic": [ + "ai" + ] + }, + "inner_hits": { + "nested_vector": { + "hits": { + "total": { + "value": 0, + "relation": "eq" + }, + "max_score": null, + "hits": [] + } + } + } + } + ] + } } ---- -//NOTCONSOLE +// TESTRESPONSE[s/"took": 42/"took": $body.took/] +============== - -Note that our example applies two reranking steps. First, we rerank the top 100 -documents from the `knn` search using the `my-rerank-model` reranker. Then we -pick the top 10 results and rerank them using the more fine-grained -`my-other-more-expensive-rerank-model`. +Note: if using more than one `inner_hits` we need to provide custom names for each `inner_hits` so that they +are unique across all retrievers within the request. [discrete] [[retrievers-examples-rrf-and-aggregations]] @@ -380,7 +785,7 @@ the `terms` aggregation for the `topic` field will include all results, not just from the 2 nested retrievers, i.e. all documents whose `year` field is greater than 2023, and whose `topic` field matches the term `elastic`. -[source,js] +[source,console] ---- GET retrievers_example/_search { @@ -412,10 +817,7 @@ GET retrievers_example/_search "rank_constant": 1 } }, - "_source": [ - "text", - "topic" - ], + "_source": false, "aggs": { "topics": { "terms": { @@ -425,4 +827,436 @@ GET retrievers_example/_search } } ---- -//NOTCONSOLE +// TEST[continued] + +.Example response +[%collapsible] +============== +[source, console-result] +---- +{ + "took": 42, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 4, + "relation": "eq" + }, + "max_score": 0.5833334, + "hits": [ + { + "_index": "retrievers_example", + "_id": "5", + "_score": 0.5833334 + }, + { + "_index": "retrievers_example", + "_id": "1", + "_score": 0.5 + }, + { + "_index": "retrievers_example", + "_id": "4", + "_score": 0.5 + }, + { + "_index": "retrievers_example", + "_id": "3", + "_score": 0.33333334 + } + ] + }, + "aggregations": { + "topics": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "ai", + "doc_count": 3 + }, + { + "key": "elastic", + "doc_count": 2 + }, + { + "key": "assistant", + "doc_count": 1 + }, + { + "key": "documentation", + "doc_count": 1 + }, + { + "key": "information_retrieval", + "doc_count": 1 + }, + { + "key": "llm", + "doc_count": 1 + }, + { + "key": "observability", + "doc_count": 1 + }, + { + "key": "security", + "doc_count": 1 + } + ] + } + } +} +---- +// TESTRESPONSE[s/"took": 42/"took": $body.took/] +============== + +[discrete] +[[retrievers-examples-explain-multiple-rrf]] +==== Example: Explainability with multiple retrievers + +By adding `explain: true` to the request, each retriever will now provide a detailed explanation of all the steps +and calculations required to compute the final score. Composability is fully supported in the context of `explain`, and +each retriever will provide its own explanation, as shown in the example below. + +[source,console] +---- +GET /retrievers_example/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "term": { + "topic": "elastic" + } + } + } + }, + { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": false, + "size": 1, + "explain": true +} +---- +// TEST[continued] + +The output of which, albeit a bit verbose, will provide all the necessary info to assist in debugging and reason with ranking. + +.Example response +[%collapsible] +============== +[source, console-result] +---- +{ + "took": 42, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 5, + "relation": "eq" + }, + "max_score": 0.5, + "hits": [ + { + "_shard": "[retrievers_example][0]", + "_node": "jnrdZFKS3abUgWVsVdj2Vg", + "_index": "retrievers_example", + "_id": "1", + "_score": 0.5, + "_explanation": { + "value": 0.5, + "description": "rrf score: [0.5] computed for initial ranks [0, 1] with rankConstant: [1] as sum of [1 / (rank + rankConstant)] for each query", + "details": [ + { + "value": 0.0, + "description": "rrf score: [0], result not found in query at index [0]", + "details": [] + }, + { + "value": 1, + "description": "rrf score: [0.5], for rank [1] in query at index [1] computed as [1 / (1 + 1)], for matching query with score", + "details": [ + { + "value": 0.8333334, + "description": "rrf score: [0.8333334] computed for initial ranks [2, 1] with rankConstant: [1] as sum of [1 / (rank + rankConstant)] for each query", + "details": [ + { + "value": 2, + "description": "rrf score: [0.33333334], for rank [2] in query at index [0] computed as [1 / (2 + 1)], for matching query with score", + "details": [ + { + "value": 2.8129659, + "description": "sum of:", + "details": [ + { + "value": 1.4064829, + "description": "weight(text:information in 0) [PerFieldSimilarity], result of:", + "details": [ + *** + ] + }, + { + "value": 1.4064829, + "description": "weight(text:retrieval in 0) [PerFieldSimilarity], result of:", + "details": [ + *** + ] + } + ] + } + ] + }, + { + "value": 1, + "description": "rrf score: [0.5], for rank [1] in query at index [1] computed as [1 / (1 + 1)], for matching query with score", + "details": [ + { + "value": 1, + "description": "doc [0] with an original score of [1.0] is at rank [1] from the following source queries.", + "details": [ + { + "value": 1.0, + "description": "found vector with calculated similarity: 1.0", + "details": [] + } + ] + } + ] + } + ] + } + ] + } + ] + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 42/"took": $body.took/] +// TESTRESPONSE[s/\.\.\./$body.hits.hits.0._explanation.details.1.details.0.details.0.details.0.details.0.details.0/] +// TESTRESPONSE[s/\*\*\*/$body.hits.hits.0._explanation.details.1.details.0.details.0.details.0.details.1.details.0/] +// TESTRESPONSE[s/jnrdZFKS3abUgWVsVdj2Vg/$body.hits.hits.0._node/] +============== + +[discrete] +[[retrievers-examples-text-similarity-reranker-on-top-of-rrf]] +==== Example: Rerank results of an RRF retriever + +To demonstrate the full functionality of retrievers, the following examples also require access to a <> set up using the <>. + +In this example we'll set up a reranking service and use it with the `text_similarity_reranker` retriever to rerank our top results. + +[source,console] +---- +PUT _inference/rerank/my-rerank-model +{ + "service": "cohere", + "service_settings": { + "model_id": "rerank-english-v3.0", + "api_key": "{{COHERE_API_KEY}}" + } +} +---- +// TEST[skip: no_access_to_ml] + +Let's start by reranking the results of the `rrf` retriever in our previous example. + +[source,console] +---- +GET retrievers_example/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "_source": false +} + +---- +// TEST[skip: no_access_to_ml] + +[discrete] +[[retrievers-examples-rrf-ranking-on-text-similarity-reranker-results]] +==== Example: RRF with semantic reranker + +For this example, we'll replace the rrf's `standard` retriever with the `text_similarity_reranker` retriever, using the +`my-rerank-model` reranker we previously configured. Since this is a reranker, it needs an initial pool of +documents to work with. In this case, we'll rerank the top `rank_window_size` documents matching the `ai` topic. + +[source,console] +---- +GET /retrievers_example/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "term": { + "topic": "ai" + } + } + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "Can I use generative AI to identify user intent and improve search relevance?" + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": false +} +---- +// TEST[skip: no_access_to_ml] + +[discrete] +[[retrievers-examples-chaining-text-similarity-reranker-retrievers]] +==== Example: Chaining multiple semantic rerankers + +Full composability means we can chain together multiple retrievers of the same type. For instance, +imagine we have a computationally expensive reranker that's specialized for AI content. We can rerank the results of a `text_similarity_reranker` using another `text_similarity_reranker` retriever. Each reranker can operate on different fields and/or use different inference services. + +[source,console] +---- +GET retrievers_example/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "text_similarity_reranker": { + "retriever": { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + "rank_window_size": 100, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "rank_window_size": 10, + "field": "text", + "inference_id": "my-other-more-expensive-rerank-model", + "inference_text": "Applications of Large Language Models in technology and their impact on user satisfaction" + } + }, + "_source": false +} +---- +// TEST[skip: no_access_to_ml] + +Note that our example applies two reranking steps. First, we rerank the top 100 +documents from the `knn` search using the `my-rerank-model` reranker. Then we +pick the top 10 results and rerank them using the more fine-grained +`my-other-more-expensive-rerank-model`. diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index 82541412db4bd..9ef1ae0ebc59b 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -43,10 +43,12 @@ DSL, with a simplified user experience. Create search applications based on your results directly in the Kibana Search UI. include::search-api.asciidoc[] +include::../../how-to/recipes.asciidoc[] +// ☝️ search relevance recipes include::retrievers-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] include::search-across-clusters.asciidoc[] include::search-with-synonyms.asciidoc[] include::search-application-overview.asciidoc[] -include::behavioral-analytics/behavioral-analytics-overview.asciidoc[] +include::behavioral-analytics/behavioral-analytics-overview.asciidoc[] \ No newline at end of file diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index 60692c19c184a..ba9c81db21384 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -21,45 +21,9 @@ This tutorial uses the <> for demonstra [[semantic-text-requirements]] ==== Requirements -To use the `semantic_text` field type, you must have an {infer} endpoint deployed in -your cluster using the <>. +This tutorial uses the <> for demonstration, which is created automatically as needed. +To use the `semantic_text` field type with an {infer} service other than ELSER, you must create an inference endpoint using the <>. -[discrete] -[[semantic-text-infer-endpoint]] -==== Create the {infer} endpoint - -Create an inference endpoint by using the <>: - -[source,console] ------------------------------------------------------------- -PUT _inference/sparse_embedding/my-elser-endpoint <1> -{ - "service": "elser", <2> - "service_settings": { - "adaptive_allocations": { <3> - "enabled": true, - "min_number_of_allocations": 3, - "max_number_of_allocations": 10 - }, - "num_threads": 1 - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The task type is `sparse_embedding` in the path as the `elser` service will -be used and ELSER creates sparse vectors. The `inference_id` is -`my-elser-endpoint`. -<2> The `elser` service is used in this example. -<3> This setting enables and configures {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations]. -Adaptive allocations make it possible for ELSER to automatically scale up or down resources based on the current load on the process. - -[NOTE] -==== -You might see a 502 bad gateway error in the response when using the {kib} Console. -This error usually just reflects a timeout, while the model downloads in the background. -You can check the download progress in the {ml-app} UI. -If using the Python client, you can set the `timeout` parameter to a higher value. -==== [discrete] [[semantic-text-index-mapping]] @@ -75,8 +39,7 @@ PUT semantic-embeddings "mappings": { "properties": { "content": { <1> - "type": "semantic_text", <2> - "inference_id": "my-elser-endpoint" <3> + "type": "semantic_text" <2> } } } @@ -85,18 +48,14 @@ PUT semantic-embeddings // TEST[skip:TBD] <1> The name of the field to contain the generated embeddings. <2> The field to contain the embeddings is a `semantic_text` field. -<3> The `inference_id` is the inference endpoint you created in the previous step. -It will be used to generate the embeddings based on the input text. -Every time you ingest data into the related `semantic_text` field, this endpoint will be used for creating the vector representation of the text. +Since no `inference_id` is provided, the <> is used by default. +To use a different {infer} service, you must create an {infer} endpoint first using the <> and then specify it in the `semantic_text` field mapping using the `inference_id` parameter. [NOTE] ==== -If you're using web crawlers or connectors to generate indices, you have to -<> for these indices to -include the `semantic_text` field. Once the mapping is updated, you'll need to run -a full web crawl or a full connector sync. This ensures that all existing -documents are reprocessed and updated with the new semantic embeddings, -enabling semantic search on the updated data. +If you're using web crawlers or connectors to generate indices, you have to <> for these indices to include the `semantic_text` field. +Once the mapping is updated, you'll need to run a full web crawl or a full connector sync. +This ensures that all existing documents are reprocessed and updated with the new semantic embeddings, enabling semantic search on the updated data. ==== @@ -288,4 +247,4 @@ query from the `semantic-embedding` index: * If you want to use `semantic_text` in hybrid search, refer to https://colab.research.google.com/github/elastic/elasticsearch-labs/blob/main/notebooks/search/09-semantic-text.ipynb[this notebook] for a step-by-step guide. * For more information on how to optimize your ELSER endpoints, refer to {ml-docs}/ml-nlp-elser.html#elser-recommendations[the ELSER recommendations] section in the model documentation. -* To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. \ No newline at end of file +* To learn more about model autoscaling, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] page. diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index a38971a0bae6a..8b0b3dc57686e 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -176,10 +176,10 @@ nodes that have a shared cache. ==== Manually mounting snapshots captured by an Index Lifecycle Management ({ilm-init}) policy can interfere with {ilm-init}'s automatic management. This may lead to issues such as data loss -or complications with snapshot handling. +or complications with snapshot handling. For optimal results, allow {ilm-init} to manage -snapshots automatically. +snapshots automatically. <>. ==== @@ -293,6 +293,14 @@ repository. If you wish to search data across multiple regions, configure multiple clusters and use <> or <> instead of {search-snaps}. +It's worth noting that if a searchable snapshot index has no replicas, then when the node +hosting it is shut down, allocation will immediately try to relocate the index to a new node +in order to maximize availability. For fully mounted indices this will result in the new node +downloading the entire index snapshot from the cloud repository. Under a rolling cluster restart, +this may happen multiple times for each searchable snapshot index. Temporarily +disabling allocation during planned node restart will prevent this, as described in +the <>. + [discrete] [[back-up-restore-searchable-snapshots]] === Back up and restore {search-snaps} diff --git a/docs/reference/security/authorization/built-in-roles.asciidoc b/docs/reference/security/authorization/built-in-roles.asciidoc index 6db08b307f193..d730587e7db17 100644 --- a/docs/reference/security/authorization/built-in-roles.asciidoc +++ b/docs/reference/security/authorization/built-in-roles.asciidoc @@ -14,11 +14,6 @@ roles have a fixed set of privileges and cannot be updated. Grants access necessary for the APM system user to send system-level data (such as monitoring) to {es}. -[[built-in-roles-apm-user]] `apm_user` :: -Grants the privileges required for APM users (such as `read` and -`view_index_metadata` privileges on the `apm-*` and `.ml-anomalies*` indices). -deprecated:[7.13.0,"See {kibana-ref}/apm-app-users.html[APM app users and privileges\] for alternatives."]. - [[built-in-roles-beats-admin]] `beats_admin` :: Grants access to the `.management-beats` index, which contains configuration information for the Beats. diff --git a/docs/reference/security/fips-140-compliance.asciidoc b/docs/reference/security/fips-140-compliance.asciidoc index 5bf73d43541d6..dec17927e62b8 100644 --- a/docs/reference/security/fips-140-compliance.asciidoc +++ b/docs/reference/security/fips-140-compliance.asciidoc @@ -53,8 +53,8 @@ https://docs.oracle.com/en/java/javase/17/security/java-cryptography-architectur https://docs.oracle.com/en/java/javase/17/security/java-secure-socket-extension-jsse-reference-guide.html[JSSE] implementation is required so that the JVM uses FIPS validated implementations of NIST recommended cryptographic algorithms. -Elasticsearch has been tested with Bouncy Castle's https://repo1.maven.org/maven2/org/bouncycastle/bc-fips/1.0.2.4/bc-fips-1.0.2.4.jar[bc-fips 1.0.2.4] -and https://repo1.maven.org/maven2/org/bouncycastle/bctls-fips/1.0.17/bctls-fips-1.0.17.jar[bctls-fips 1.0.17]. +Elasticsearch has been tested with Bouncy Castle's https://repo1.maven.org/maven2/org/bouncycastle/bc-fips/1.0.2.5/bc-fips-1.0.2.5.jar[bc-fips 1.0.2.5] +and https://repo1.maven.org/maven2/org/bouncycastle/bctls-fips/1.0.19/bctls-fips-1.0.19.jar[bctls-fips 1.0.19]. Please refer to the {es} https://www.elastic.co/support/matrix#matrix_jvm[JVM support matrix] for details on which combinations of JVM and security provider are supported in FIPS mode. Elasticsearch does not ship with a FIPS certified provider. It is the responsibility of the user to install and configure the security provider to ensure compliance with FIPS 140-2. Using a FIPS certified provider will ensure that only diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index 145112ef4d27c..c375ddf076a66 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -118,6 +118,17 @@ If you configure multiple email accounts, you must either configure this setting or specify the email account to use in the <> action. See <>. +`xpack.notification.email.recipient_allowlist`:: +(<>) +Specifies addresses to which emails are allowed to be sent. +Emails with recipients (`To:`, `Cc:`, or `Bcc:`) outside of these patterns will be rejected and an +error thrown. This setting defaults to `["*"]` which means all recipients are allowed. +Simple globbing is supported, such as `list-*@company.com` in the list of allowed recipients. + +NOTE: This setting can't be used at the same time as `xpack.notification.email.account.domain_allowlist` +and an error will be thrown if both are set at the same time. This setting can be used to specify domains +to allow by using a wildcard pattern such as `*@company.com`. + `xpack.notification.email.account`:: Specifies account information for sending notifications via email. You can specify the following email account attributes: @@ -129,6 +140,10 @@ Specifies domains to which emails are allowed to be sent. Emails with recipients `Bcc:`) outside of these domains will be rejected and an error thrown. This setting defaults to `["*"]` which means all domains are allowed. Simple globbing is supported, such as `*.company.com` in the list of allowed domains. + +NOTE: This setting can't be used at the same time as `xpack.notification.email.recipient_allowlist` +and an error will be thrown if both are set at the same time. + -- [[email-account-attributes]] diff --git a/docs/reference/snapshot-restore/repository-azure.asciidoc b/docs/reference/snapshot-restore/repository-azure.asciidoc index 0e6e1478cfc55..50dc42ac9163d 100644 --- a/docs/reference/snapshot-restore/repository-azure.asciidoc +++ b/docs/reference/snapshot-restore/repository-azure.asciidoc @@ -181,7 +181,7 @@ is running. When running {es} in https://azure.microsoft.com/en-gb/products/kubernetes-service[Azure Kubernetes -Service], for instance using {eck-ref}[{eck}], you should use +Service], for instance using {eck-ref}/k8s-snapshots.html#k8s-azure-workload-identity[{eck}], you should use https://azure.github.io/azure-workload-identity/docs/introduction.html[Azure Workload Identity] to provide credentials to {es}. To use Azure Workload Identity, mount the `azure-identity-token` volume as a subdirectory of the diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 36f311b1cdd97..9b71fe9220385 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -6,6 +6,9 @@ You can use AWS S3 as a repository for {ref}/snapshot-restore.html[Snapshot/Rest *If you are looking for a hosted solution of Elasticsearch on AWS, please visit https://www.elastic.co/cloud/.* +See https://www.youtube.com/watch?v=ACqfyzWf-xs[this video] +for a walkthrough of connecting an AWS S3 repository. + [[repository-s3-usage]] ==== Getting started @@ -35,7 +38,8 @@ PUT _snapshot/my_s3_repository The client that you use to connect to S3 has a number of settings available. The settings have the form `s3.client.CLIENT_NAME.SETTING_NAME`. By default, `s3` repositories use a client named `default`, but this can be modified using -the <> `client`. For example: +the <> `client`. For example, to +use a client named `my-alternate-client`, register the repository as follows: [source,console] ---- @@ -66,10 +70,19 @@ bin/elasticsearch-keystore add s3.client.default.secret_key bin/elasticsearch-keystore add s3.client.default.session_token ---- -If instead you want to use the instance role or container role to access S3 -then you should leave these settings unset. You can switch from using specific -credentials back to the default of using the instance role or container role by -removing these settings from the keystore as follows: +If you do not configure these settings then {es} will attempt to automatically +obtain credentials from the environment in which it is running: + +* Nodes running on an instance in AWS EC2 will attempt to use the EC2 Instance + Metadata Service (IMDS) to obtain instance role credentials. {es} supports + both IMDS version 1 and IMDS version 2. + +* Nodes running in a container in AWS ECS and AWS EKS will attempt to obtain + container role credentials similarly. + +You can switch from using specific credentials back to the default of using the +instance role or container role by removing these settings from the keystore as +follows: [source,sh] ---- @@ -79,20 +92,14 @@ bin/elasticsearch-keystore remove s3.client.default.secret_key bin/elasticsearch-keystore remove s3.client.default.session_token ---- -*All* client secure settings of this repository type are -{ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -You can define these settings before the node is started, -or call the <> -after the settings are defined to apply them to a running node. - -After you reload the settings, the internal `s3` clients, used to transfer the snapshot -contents, will utilize the latest settings from the keystore. Any existing `s3` -repositories, as well as any newly created ones, will pick up the new values -stored in the keystore. - -NOTE: In-progress snapshot/restore tasks will not be preempted by a *reload* of -the client's secure settings. The task will complete using the client as it was -built when the operation started. +Define the relevant secure settings in each node's keystore before starting the +node. The secure settings described here are all +{ref}/secure-settings.html#reloadable-secure-settings[reloadable] so you may +update the keystore contents on each node while the node is running and then +call the <> to apply the updated settings to the nodes in the cluster. After this API +completes, {es} will use the updated setting values for all future snapshot +operations, but ongoing operations may continue to use older setting values. The following list contains the available client settings. Those that must be stored in the keystore are marked as "secure" and are *reloadable*; the other diff --git a/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc b/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc index fe9422d6d4c53..e1ceefb92bbec 100644 --- a/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc +++ b/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc @@ -8,5 +8,6 @@ In order to diagnose the unassigned shards in your deployment use the following include::{es-ref-dir}/tab-widgets/troubleshooting/data/diagnose-unassigned-shards-widget.asciidoc[] - +See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video] +for a walkthrough of monitoring allocation health. diff --git a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc index eb56a37562c31..4289242deb486 100644 --- a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc +++ b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc @@ -19,6 +19,8 @@ operate to have a green health status. In many cases, your cluster will recover to green health status automatically. If the cluster doesn't automatically recover, then you must <> the remaining problems so management and cleanup activities can proceed. +See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video] +for a walkthrough of monitoring allocation health. [discrete] [[diagnose-cluster-status]] @@ -90,6 +92,8 @@ PUT _cluster/settings } ---- +See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed". + [discrete] [[fix-cluster-status-recover-nodes]] ===== Recover lost nodes @@ -262,3 +266,5 @@ POST _cluster/reroute ---- // TEST[s/^/PUT my-index\n/] // TEST[catch:bad_request] + +See https://www.youtube.com/watch?v=6OAg9IyXFO4[this video] for a walkthrough of troubleshooting `no_valid_shard_copy`. \ No newline at end of file diff --git a/docs/reference/troubleshooting/diagnostic.asciidoc b/docs/reference/troubleshooting/diagnostic.asciidoc index a944ca88d285d..c6d46b9e94fc8 100644 --- a/docs/reference/troubleshooting/diagnostic.asciidoc +++ b/docs/reference/troubleshooting/diagnostic.asciidoc @@ -13,6 +13,8 @@ This information can be used to troubleshoot problems with your cluster. For exa You can generate diagnostic information using this tool before you contact https://support.elastic.co[Elastic Support] or https://discuss.elastic.co[Elastic Discuss] to minimize turnaround time. +See this https://www.youtube.com/watch?v=Bb6SaqhqYHw[this video] for a walkthrough of capturing an {es} diagnostic. + [discrete] [[diagnostic-tool-requirements]] === Requirements diff --git a/docs/reference/vectors/vector-functions.asciidoc b/docs/reference/vectors/vector-functions.asciidoc index 10dca8084e28a..23419e8eb12b1 100644 --- a/docs/reference/vectors/vector-functions.asciidoc +++ b/docs/reference/vectors/vector-functions.asciidoc @@ -336,6 +336,10 @@ When using `bit` vectors, not all the vector functions are available. The suppor this is the sum of the bitwise AND of the two vectors. If providing `float[]` or `byte[]`, who has `dims` number of elements, as a query vector, the `dotProduct` is the sum of the floating point values using the stored `bit` vector as a mask. +NOTE: When comparing `floats` and `bytes` with `bit` vectors, the `bit` vector is treated as a mask in big-endian order. +For example, if the `bit` vector is `10100001` (e.g. the single byte value `161`) and its compared +with array of values `[1, 2, 3, 4, 5, 6, 7, 8]` the `dotProduct` will be `1 + 3 + 8 = 16`. + Here is an example of using dot-product with bit vectors. [source,console] diff --git a/docs/reference/watcher/example-watches/watching-time-series-data.asciidoc b/docs/reference/watcher/example-watches/watching-time-series-data.asciidoc index 421c69619cfea..b1c776baae1de 100644 --- a/docs/reference/watcher/example-watches/watching-time-series-data.asciidoc +++ b/docs/reference/watcher/example-watches/watching-time-series-data.asciidoc @@ -62,20 +62,20 @@ contain the words "error" or "problem". To set up the watch: -. Define the watch trigger--a daily schedule that runs at 12:00 UTC: +. Define the watch trigger--a daily schedule that runs at 12:00 Australian Eastern Standard Time (UTC+10:00): + [source,js] -------------------------------------------------- "trigger" : { "schedule" : { + "timezone": "Australia/Brisbane", "daily" : { "at" : "12:00" } } } -------------------------------------------------- + -NOTE: In {watcher}, you specify times in UTC time. Don't forget to do the - conversion from your local time so the schedule triggers at the time - you intend. +NOTE: In {watcher}, if the timezone is omitted then schedules default to UTC. `timezone` can be specified either +as a +/-HH:mm offset from UTC or as a timezone name from the machines local IANA Time Zone Database. . Define the watch input--a search that uses a filter to constrain the results to the past day. diff --git a/docs/reference/watcher/trigger/schedule.asciidoc b/docs/reference/watcher/trigger/schedule.asciidoc index fa389409d15c4..d2bf466644e10 100644 --- a/docs/reference/watcher/trigger/schedule.asciidoc +++ b/docs/reference/watcher/trigger/schedule.asciidoc @@ -6,12 +6,42 @@ ++++ Schedule <> define when the watch execution should start based -on date and time. All times are specified in UTC time. +on date and time. All times are in UTC time unless a timezone is explicitly specified +in the schedule. {watcher} uses the system clock to determine the current time. To ensure schedules are triggered when expected, you should synchronize the clocks of all nodes in the cluster using a time service such as http://www.ntp.org/[NTP]. +NOTE: {watcher} can't correct for manual adjustments to the system clock. Be aware when making +such changes that watch execution may be affected with watches being skipped or repeated if the +adjustment covers their target execution time. This applies to changes made via NTP as well. + +When specifying a timezone for a watch, keep in mind the effect daylight savings time +transitions may have on the schedule, especially if the watch is scheduled to run +during the transition. Here's how {watcher} handles watches scheduled during discontinuities: + +==== Gap Transitions +These occur when the clock moves forward, such as when daylight savings time starts +and cause certain hours or minutes to be skipped. If your watch is scheduled to run +during a gap transition, the watch is executed at the same time as before the transition. + +Example: If a watch is scheduled to run daily at 1:30AM in the `Europe/London` time zone and +the clock moves forward one hour from 1:00AM (GMT+0) to 2:00AM (GMT+1), the watch is executed +at 2:30AM (GMT+1) which would have been 1:30AM before the transition. Subsequent executions +happen at 1:30AM (GMT+1). + +==== Overlap Transitions +These occur when the clock moves backward, such as when daylight savings time ends +and cause certain hours or minutes to be repeated. If your watch is scheduled to run +during an overlap transition, only the first occurrence of the time causes to the watch +to execute with the second being skipped. + +Example: If a watch is scheduled to run at 1:30 AM and the clock moves backward one hour +from 2:00AM to 1:00AM, the watch is executed at 1:30AM and the second occurrence after the +change is skipped. + +=== Throttling Keep in mind that the throttle period can affect when a watch is actually executed. The default throttle period is five seconds (5000 ms). If you configure a schedule that's more frequent than the throttle period, the throttle period overrides the @@ -20,6 +50,7 @@ and set the schedule to every 10 seconds, the watch is executed no more than once per minute. For more information about throttling, see <>. +=== Schedule Types {watcher} provides several types of schedule triggers: * <> diff --git a/docs/reference/watcher/trigger/schedule/cron.asciidoc b/docs/reference/watcher/trigger/schedule/cron.asciidoc index 673f350435c5f..c33bf524a8737 100644 --- a/docs/reference/watcher/trigger/schedule/cron.asciidoc +++ b/docs/reference/watcher/trigger/schedule/cron.asciidoc @@ -5,14 +5,14 @@ ++++ -Defines a <> using a <> +Defines a <> using a <> that specifiues when to execute a watch. -TIP: While cron expressions are powerful, a regularly occurring schedule -is easier to configure with the other schedule types. -If you must use a cron schedule, make sure you verify it with -<> . +TIP: While cron expressions are powerful, a regularly occurring schedule +is easier to configure with the other schedule types. +If you must use a cron schedule, make sure you verify it with +<> . ===== Configure a cron schedule with one time @@ -60,16 +60,40 @@ minute during the weekend: -------------------------------------------------- // NOTCONSOLE +[[configue_cron_time-zone]] +==== Use a different time zone for a cron schedule +By default, cron expressions are evaluated in the UTC time zone. To use a different time zone, +you can specify the `timezone` parameter in the schedule. For example, the following +`cron` schedule triggers at 6:00 AM and 6:00 PM during weekends in the `America/Los_Angeles` time zone: + + +[source,js] +-------------------------------------------------- +{ + ... + "trigger" : { + "schedule" : { + "timezone" : "America/Los_Angeles", + "cron" : [ + "0 6,18 * * * SAT-SUN", + ] + } + } + ... +} +-------------------------------------------------- +// NOTCONSOLE + [[croneval]] ===== Use croneval to validate cron expressions -{es} provides a <> command line tool -in the `$ES_HOME/bin` directory that you can use to check that your cron expressions +{es} provides a <> command line tool +in the `$ES_HOME/bin` directory that you can use to check that your cron expressions are valid and produce the expected results. -To validate a cron expression, pass it in as a parameter to `elasticsearch-croneval`: +To validate a cron expression, pass it in as a parameter to `elasticsearch-croneval`: [source,bash] -------------------------------------------------- bin/elasticsearch-croneval "0 0/1 * * * ?" --------------------------------------------------- +-------------------------------------------------- diff --git a/docs/reference/watcher/trigger/schedule/daily.asciidoc b/docs/reference/watcher/trigger/schedule/daily.asciidoc index cea2b8316e02f..d258d9c612350 100644 --- a/docs/reference/watcher/trigger/schedule/daily.asciidoc +++ b/docs/reference/watcher/trigger/schedule/daily.asciidoc @@ -97,3 +97,27 @@ or minutes as an array. For example, following `daily` schedule triggers at } -------------------------------------------------- // NOTCONSOLE + +[[specifying-time-zone-for-daily-schedule]] +===== Specifying a time zone for a daily schedule +By default, daily schedules are evaluated in the UTC time zone. To use a different time zone, +you can specify the `timezone` parameter in the schedule. For example, the following +`daily` schedule triggers at 6:00 AM and 6:00 PM in the `Pacific/Galapagos` time zone: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "timezone" : "Pacific/Galapagos", + "daily" : { + "at" : { + "hour" : [ 6, 18 ], + "minute" : 0 + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/watcher/trigger/schedule/monthly.asciidoc b/docs/reference/watcher/trigger/schedule/monthly.asciidoc index 7d13262ed2fa8..694c76aaee23a 100644 --- a/docs/reference/watcher/trigger/schedule/monthly.asciidoc +++ b/docs/reference/watcher/trigger/schedule/monthly.asciidoc @@ -74,4 +74,26 @@ schedule triggers at 12:00 AM and 12:00 PM on the 10th and 20th of each month. } } -------------------------------------------------- -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE + +==== Configuring time zones for monthly schedules +By default, monthly schedules are evaluated in the UTC time zone. To use a different +time zone, you can specify the `timezone` parameter in the schedule. For example, +the following `monthly` schedule triggers at 6:00 AM and 6:00 PM on the 15th of each month in +the `Asia/Tokyo` time zone: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "timezone" : "Asia/Tokyo", + "monthly" : { + "on" : [ 15 ], + "at" : [ 6:00, 18:00 ] + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/watcher/trigger/schedule/weekly.asciidoc b/docs/reference/watcher/trigger/schedule/weekly.asciidoc index 5b43de019ad25..53bd2f3167b21 100644 --- a/docs/reference/watcher/trigger/schedule/weekly.asciidoc +++ b/docs/reference/watcher/trigger/schedule/weekly.asciidoc @@ -79,4 +79,26 @@ Alternatively, you can specify days and times in an object that has `on` and } } -------------------------------------------------- -// NOTCONSOLE \ No newline at end of file +// NOTCONSOLE + +==== Use a different time zone for a weekly schedule +By default, weekly schedules are evaluated in the UTC time zone. To use a different time zone, +you can specify the `timezone` parameter in the schedule. For example, the following +`weekly` schedule triggers at 6:00 AM and 6:00 PM on Tuesdays and Fridays in the +`America/Buenos_Aires` time zone: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "timezone" : "America/Buenos_Aires", + "weekly" : { + "on" : [ "tuesday", "friday" ], + "at" : [ "6:00", "18:00" ] + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/watcher/trigger/schedule/yearly.asciidoc b/docs/reference/watcher/trigger/schedule/yearly.asciidoc index 8fce024bf9f4a..c33321ef5a7dc 100644 --- a/docs/reference/watcher/trigger/schedule/yearly.asciidoc +++ b/docs/reference/watcher/trigger/schedule/yearly.asciidoc @@ -88,3 +88,26 @@ January 20th, December 10th, and December 20th. } -------------------------------------------------- // NOTCONSOLE + +==== Configuring a yearly schedule with a different time zone +By default, the `yearly` schedule is evaluated in the UTC time zone. To use a +different time zone, you can specify the `timezone` parameter in the schedule. +For example, the following `yearly` schedule triggers at 3:30 PM and 8:30 PM +on June 4th in the `Antarctica/Troll` time zone: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "timezone" : "Antarctica/Troll", + "yearly" : { + "in" : "june", + "on" : 4, + "at" : [ 15:30, 20:30 ] + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/gradle.properties b/gradle.properties index 745fb4f9e51ae..aa38a61ab0057 100644 --- a/gradle.properties +++ b/gradle.properties @@ -2,7 +2,7 @@ org.gradle.welcome=never org.gradle.warning.mode=none org.gradle.parallel=true # We need to declare --add-exports to make spotless working seamlessly with jdk16 -org.gradle.jvmargs=-XX:+HeapDumpOnOutOfMemoryError -Xss2m --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED +org.gradle.jvmargs=-XX:+HeapDumpOnOutOfMemoryError -Xss2m --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED --add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED --add-opens java.base/java.time=ALL-UNNAMED # Enforce the build to fail on deprecated gradle api usage systemProp.org.gradle.warning.mode=fail diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index d11c4b7fd9c91..05fda8e0244de 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -1,5 +1,5 @@ [versions] -asm = "9.6" +asm = "9.7.1" jackson = "2.15.0" junit5 = "5.8.1" spock = "2.1-groovy-3.0" @@ -16,7 +16,7 @@ checkstyle = "com.puppycrawl.tools:checkstyle:10.3" commons-codec = "commons-codec:commons-codec:1.11" commmons-io = "commons-io:commons-io:2.2" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" -forbiddenApis = "de.thetaphi:forbiddenapis:3.6" +forbiddenApis = "de.thetaphi:forbiddenapis:3.8" gradle-enterprise = "com.gradle:develocity-gradle-plugin:3.18.1" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" @@ -39,7 +39,7 @@ maven-model = "org.apache.maven:maven-model:3.6.2" mockito-core = "org.mockito:mockito-core:1.9.5" nebula-info = "com.netflix.nebula:gradle-info-plugin:11.3.3" reflections = "org.reflections:reflections:0.9.12" -shadow-plugin = "com.github.breskeby:shadow:3b035f2" +shadow-plugin = "com.gradleup.shadow:shadow-gradle-plugin:8.3.5" snakeyaml = { group = "org.yaml", name = "snakeyaml", version = { strictly = "2.0" } } spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 2b8f1b2a09ad9..37178fd9439d0 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -219,6 +219,11 @@ + + + + + @@ -234,16 +239,31 @@ + + + + + + + + + + + + + + + @@ -414,21 +434,16 @@ + + + + + - - - - - - - - - - @@ -614,6 +629,11 @@ + + + + + @@ -799,6 +819,11 @@ + + + + + @@ -919,6 +944,11 @@ + + + + + @@ -1176,6 +1206,11 @@ + + + + + @@ -1196,6 +1231,11 @@ + + + + + @@ -1226,16 +1266,21 @@ - - - - - + + + + + + + + + + @@ -1301,6 +1346,11 @@ + + + + + @@ -1311,6 +1361,11 @@ + + + + + @@ -1922,6 +1977,11 @@ + + + + + @@ -1937,6 +1997,11 @@ + + + + + @@ -2282,6 +2347,11 @@ + + + + + @@ -2312,14 +2382,14 @@ - - - + + + - - - + + + @@ -2354,9 +2424,9 @@ - - - + + + @@ -2810,6 +2880,11 @@ + + + + + @@ -2830,6 +2905,11 @@ + + + + + @@ -2965,6 +3045,21 @@ + + + + + + + + + + + + + + + @@ -2980,6 +3075,11 @@ + + + + + @@ -3020,6 +3120,11 @@ + + + + + @@ -3045,6 +3150,11 @@ + + + + + @@ -3203,14 +3313,14 @@ - - - + + + - - - + + + @@ -3248,9 +3358,9 @@ - - - + + + @@ -3353,6 +3463,11 @@ + + + + + @@ -3368,6 +3483,16 @@ + + + + + + + + + + @@ -3393,6 +3518,16 @@ + + + + + + + + + + @@ -3423,11 +3558,21 @@ + + + + + + + + + + @@ -3533,6 +3678,11 @@ + + + + + @@ -3548,66 +3698,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -3618,11 +3833,21 @@ + + + + + + + + + + @@ -3653,6 +3878,11 @@ + + + + + @@ -3678,6 +3908,11 @@ + + + + + @@ -4153,6 +4388,11 @@ + + + + + @@ -4198,6 +4438,11 @@ + + + + + @@ -4238,6 +4483,11 @@ + + + + + @@ -4418,11 +4668,21 @@ + + + + + + + + + + diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 6acc1431eaec1..22286c90de3d1 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=2ab88d6de2c23e6adae7363ae6e29cbdd2a709e992929b48b6530fd0c7133bd6 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip +distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a +distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/libs/core/build.gradle b/libs/core/build.gradle index e24417e09a53d..99c22620e7354 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -8,7 +8,6 @@ */ apply plugin: 'elasticsearch.publish' -apply plugin: 'elasticsearch.mrjar' dependencies { // This dependency is used only by :libs:core for null-checking interop with other tools diff --git a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java index f5fe8d41c2243..9e23d2c0412c3 100644 --- a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java +++ b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java @@ -9,33 +9,120 @@ package org.elasticsearch.entitlement.instrumentation.impl; +import org.elasticsearch.entitlement.instrumentation.CheckMethod; import org.elasticsearch.entitlement.instrumentation.InstrumentationService; import org.elasticsearch.entitlement.instrumentation.Instrumenter; import org.elasticsearch.entitlement.instrumentation.MethodKey; +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; import java.util.Map; -import java.util.stream.Stream; public class InstrumentationServiceImpl implements InstrumentationService { + @Override - public Instrumenter newInstrumenter(String classNameSuffix, Map instrumentationMethods) { - return new InstrumenterImpl(classNameSuffix, instrumentationMethods); + public Instrumenter newInstrumenter(Map checkMethods) { + return InstrumenterImpl.create(checkMethods); } - /** - * @return a {@link MethodKey} suitable for looking up the given {@code targetMethod} in the entitlements trampoline - */ - public MethodKey methodKeyForTarget(Method targetMethod) { - Type actualType = Type.getMethodType(Type.getMethodDescriptor(targetMethod)); - return new MethodKey( - Type.getInternalName(targetMethod.getDeclaringClass()), - targetMethod.getName(), - Stream.of(actualType.getArgumentTypes()).map(Type::getInternalName).toList(), - Modifier.isStatic(targetMethod.getModifiers()) - ); + @Override + public Map lookupMethodsToInstrument(String entitlementCheckerClassName) throws ClassNotFoundException, + IOException { + var methodsToInstrument = new HashMap(); + var checkerClass = Class.forName(entitlementCheckerClassName); + var classFileInfo = InstrumenterImpl.getClassFileInfo(checkerClass); + ClassReader reader = new ClassReader(classFileInfo.bytecodes()); + ClassVisitor visitor = new ClassVisitor(Opcodes.ASM9) { + @Override + public MethodVisitor visitMethod( + int access, + String checkerMethodName, + String checkerMethodDescriptor, + String signature, + String[] exceptions + ) { + var mv = super.visitMethod(access, checkerMethodName, checkerMethodDescriptor, signature, exceptions); + + var checkerMethodArgumentTypes = Type.getArgumentTypes(checkerMethodDescriptor); + var methodToInstrument = parseCheckerMethodSignature(checkerMethodName, checkerMethodArgumentTypes); + + var checkerParameterDescriptors = Arrays.stream(checkerMethodArgumentTypes).map(Type::getDescriptor).toList(); + var checkMethod = new CheckMethod(Type.getInternalName(checkerClass), checkerMethodName, checkerParameterDescriptors); + + methodsToInstrument.put(methodToInstrument, checkMethod); + + return mv; + } + }; + reader.accept(visitor, 0); + return methodsToInstrument; } + private static final Type CLASS_TYPE = Type.getType(Class.class); + + static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] checkerMethodArgumentTypes) { + var classNameStartIndex = checkerMethodName.indexOf('$'); + var classNameEndIndex = checkerMethodName.lastIndexOf('$'); + + if (classNameStartIndex == -1 || classNameStartIndex >= classNameEndIndex) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Checker method %s has incorrect name format. " + + "It should be either check$$methodName (instance), check$package_ClassName$methodName (static) or " + + "check$package_ClassName$ (ctor)", + checkerMethodName + ) + ); + } + + // No "className" (check$$methodName) -> method is instance, and we'll get the class from the actual typed argument + final boolean targetMethodIsStatic = classNameStartIndex + 1 != classNameEndIndex; + // No "methodName" (check$package_ClassName$) -> method is ctor + final boolean targetMethodIsCtor = classNameEndIndex + 1 == checkerMethodName.length(); + final String targetMethodName = targetMethodIsCtor ? "" : checkerMethodName.substring(classNameEndIndex + 1); + + final String targetClassName; + final List targetParameterTypes; + if (targetMethodIsStatic) { + if (checkerMethodArgumentTypes.length < 1 || CLASS_TYPE.equals(checkerMethodArgumentTypes[0]) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Checker method %s has incorrect argument types. " + "It must have a first argument of Class type.", + checkerMethodName + ) + ); + } + + targetClassName = checkerMethodName.substring(classNameStartIndex + 1, classNameEndIndex).replace('_', '/'); + targetParameterTypes = Arrays.stream(checkerMethodArgumentTypes).skip(1).map(Type::getInternalName).toList(); + } else { + if (checkerMethodArgumentTypes.length < 2 + || CLASS_TYPE.equals(checkerMethodArgumentTypes[0]) == false + || checkerMethodArgumentTypes[1].getSort() != Type.OBJECT) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Checker method %s has incorrect argument types. " + + "It must have a first argument of Class type, and a second argument of the class containing the method to " + + "instrument", + checkerMethodName + ) + ); + } + var targetClassType = checkerMethodArgumentTypes[1]; + targetClassName = targetClassType.getInternalName(); + targetParameterTypes = Arrays.stream(checkerMethodArgumentTypes).skip(2).map(Type::getInternalName).toList(); + } + return new MethodKey(targetClassName, targetMethodName, targetParameterTypes); + } } diff --git a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java index 63c9ccd80be70..57e30c01c5c28 100644 --- a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java +++ b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java @@ -9,6 +9,7 @@ package org.elasticsearch.entitlement.instrumentation.impl; +import org.elasticsearch.entitlement.instrumentation.CheckMethod; import org.elasticsearch.entitlement.instrumentation.Instrumenter; import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.objectweb.asm.AnnotationVisitor; @@ -23,7 +24,6 @@ import java.io.IOException; import java.io.InputStream; -import java.lang.reflect.Method; import java.util.Map; import java.util.stream.Stream; @@ -36,15 +36,44 @@ import static org.objectweb.asm.Opcodes.INVOKEVIRTUAL; public class InstrumenterImpl implements Instrumenter { + + private final String getCheckerClassMethodDescriptor; + private final String handleClass; + /** * To avoid class name collisions during testing without an agent to replace classes in-place. */ private final String classNameSuffix; - private final Map instrumentationMethods; - - public InstrumenterImpl(String classNameSuffix, Map instrumentationMethods) { + private final Map checkMethods; + + InstrumenterImpl( + String handleClass, + String getCheckerClassMethodDescriptor, + String classNameSuffix, + Map checkMethods + ) { + this.handleClass = handleClass; + this.getCheckerClassMethodDescriptor = getCheckerClassMethodDescriptor; this.classNameSuffix = classNameSuffix; - this.instrumentationMethods = instrumentationMethods; + this.checkMethods = checkMethods; + } + + static String getCheckerClassName() { + int javaVersion = Runtime.version().feature(); + final String classNamePrefix; + if (javaVersion >= 23) { + classNamePrefix = "Java23"; + } else { + classNamePrefix = ""; + } + return "org/elasticsearch/entitlement/bridge/" + classNamePrefix + "EntitlementChecker"; + } + + public static InstrumenterImpl create(Map checkMethods) { + String checkerClass = getCheckerClassName(); + String handleClass = checkerClass + "Handle"; + String getCheckerClassMethodDescriptor = Type.getMethodDescriptor(Type.getObjectType(checkerClass)); + return new InstrumenterImpl(handleClass, getCheckerClassMethodDescriptor, "", checkMethods); } public ClassFileInfo instrumentClassFile(Class clazz) throws IOException { @@ -138,16 +167,12 @@ public MethodVisitor visitMethod(int access, String name, String descriptor, Str var mv = super.visitMethod(access, name, descriptor, signature, exceptions); if (isAnnotationPresent == false) { boolean isStatic = (access & ACC_STATIC) != 0; - var key = new MethodKey( - className, - name, - Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList(), - isStatic - ); - var instrumentationMethod = instrumentationMethods.get(key); + boolean isCtor = "".equals(name); + var key = new MethodKey(className, name, Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList()); + var instrumentationMethod = checkMethods.get(key); if (instrumentationMethod != null) { // LOGGER.debug("Will instrument method {}", key); - return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, descriptor, instrumentationMethod); + return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, isCtor, descriptor, instrumentationMethod); } else { // LOGGER.trace("Will not instrument method {}", key); } @@ -176,21 +201,24 @@ private void addClassAnnotationIfNeeded() { class EntitlementMethodVisitor extends MethodVisitor { private final boolean instrumentedMethodIsStatic; + private final boolean instrumentedMethodIsCtor; private final String instrumentedMethodDescriptor; - private final Method instrumentationMethod; + private final CheckMethod checkMethod; private boolean hasCallerSensitiveAnnotation = false; EntitlementMethodVisitor( int api, MethodVisitor methodVisitor, boolean instrumentedMethodIsStatic, + boolean instrumentedMethodIsCtor, String instrumentedMethodDescriptor, - Method instrumentationMethod + CheckMethod checkMethod ) { super(api, methodVisitor); this.instrumentedMethodIsStatic = instrumentedMethodIsStatic; + this.instrumentedMethodIsCtor = instrumentedMethodIsCtor; this.instrumentedMethodDescriptor = instrumentedMethodDescriptor; - this.instrumentationMethod = instrumentationMethod; + this.checkMethod = checkMethod; } @Override @@ -249,35 +277,33 @@ private void pushCallerClass() { private void forwardIncomingArguments() { int localVarIndex = 0; - if (instrumentedMethodIsStatic == false) { + if (instrumentedMethodIsCtor) { + localVarIndex++; + } else if (instrumentedMethodIsStatic == false) { mv.visitVarInsn(Opcodes.ALOAD, localVarIndex++); } for (Type type : Type.getArgumentTypes(instrumentedMethodDescriptor)) { mv.visitVarInsn(type.getOpcode(Opcodes.ILOAD), localVarIndex); localVarIndex += type.getSize(); } - } private void invokeInstrumentationMethod() { mv.visitMethodInsn( INVOKEINTERFACE, - Type.getInternalName(instrumentationMethod.getDeclaringClass()), - instrumentationMethod.getName(), - Type.getMethodDescriptor(instrumentationMethod), + checkMethod.className(), + checkMethod.methodName(), + Type.getMethodDescriptor( + Type.VOID_TYPE, + checkMethod.parameterDescriptors().stream().map(Type::getType).toArray(Type[]::new) + ), true ); } } protected void pushEntitlementChecker(MethodVisitor mv) { - mv.visitMethodInsn( - INVOKESTATIC, - "org/elasticsearch/entitlement/bridge/EntitlementCheckerHandle", - "instance", - "()Lorg/elasticsearch/entitlement/bridge/EntitlementChecker;", - false - ); + mv.visitMethodInsn(INVOKESTATIC, handleClass, "instance", getCheckerClassMethodDescriptor, false); } public record ClassFileInfo(String fileName, byte[] bytecodes) {} diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java new file mode 100644 index 0000000000000..9ccb72637d463 --- /dev/null +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java @@ -0,0 +1,318 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.instrumentation.impl; + +import org.elasticsearch.entitlement.instrumentation.CheckMethod; +import org.elasticsearch.entitlement.instrumentation.InstrumentationService; +import org.elasticsearch.entitlement.instrumentation.MethodKey; +import org.elasticsearch.test.ESTestCase; +import org.objectweb.asm.Type; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; + +@ESTestCase.WithoutSecurityManager +public class InstrumentationServiceImplTests extends ESTestCase { + + final InstrumentationService instrumentationService = new InstrumentationServiceImpl(); + + static class TestTargetClass {} + + interface TestChecker { + void check$org_example_TestTargetClass$staticMethod(Class clazz, int arg0, String arg1, Object arg2); + + void check$$instanceMethodNoArgs(Class clazz, TestTargetClass that); + + void check$$instanceMethodWithArgs(Class clazz, TestTargetClass that, int x, int y); + } + + interface TestCheckerOverloads { + void check$org_example_TestTargetClass$staticMethodWithOverload(Class clazz, int x, int y); + + void check$org_example_TestTargetClass$staticMethodWithOverload(Class clazz, int x, String y); + } + + interface TestCheckerCtors { + void check$org_example_TestTargetClass$(Class clazz); + + void check$org_example_TestTargetClass$(Class clazz, int x, String y); + } + + public void testInstrumentationTargetLookup() throws IOException, ClassNotFoundException { + Map checkMethods = instrumentationService.lookupMethodsToInstrument(TestChecker.class.getName()); + + assertThat(checkMethods, aMapWithSize(3)); + assertThat( + checkMethods, + hasEntry( + equalTo(new MethodKey("org/example/TestTargetClass", "staticMethod", List.of("I", "java/lang/String", "java/lang/Object"))), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestChecker", + "check$org_example_TestTargetClass$staticMethod", + List.of("Ljava/lang/Class;", "I", "Ljava/lang/String;", "Ljava/lang/Object;") + ) + ) + ) + ); + assertThat( + checkMethods, + hasEntry( + equalTo( + new MethodKey( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", + "instanceMethodNoArgs", + List.of() + ) + ), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestChecker", + "check$$instanceMethodNoArgs", + List.of( + "Ljava/lang/Class;", + "Lorg/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass;" + ) + ) + ) + ) + ); + assertThat( + checkMethods, + hasEntry( + equalTo( + new MethodKey( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", + "instanceMethodWithArgs", + List.of("I", "I") + ) + ), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestChecker", + "check$$instanceMethodWithArgs", + List.of( + "Ljava/lang/Class;", + "Lorg/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass;", + "I", + "I" + ) + ) + ) + ) + ); + } + + public void testInstrumentationTargetLookupWithOverloads() throws IOException, ClassNotFoundException { + Map checkMethods = instrumentationService.lookupMethodsToInstrument(TestCheckerOverloads.class.getName()); + + assertThat(checkMethods, aMapWithSize(2)); + assertThat( + checkMethods, + hasEntry( + equalTo(new MethodKey("org/example/TestTargetClass", "staticMethodWithOverload", List.of("I", "java/lang/String"))), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerOverloads", + "check$org_example_TestTargetClass$staticMethodWithOverload", + List.of("Ljava/lang/Class;", "I", "Ljava/lang/String;") + ) + ) + ) + ); + assertThat( + checkMethods, + hasEntry( + equalTo(new MethodKey("org/example/TestTargetClass", "staticMethodWithOverload", List.of("I", "I"))), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerOverloads", + "check$org_example_TestTargetClass$staticMethodWithOverload", + List.of("Ljava/lang/Class;", "I", "I") + ) + ) + ) + ); + } + + public void testInstrumentationTargetLookupWithCtors() throws IOException, ClassNotFoundException { + Map checkMethods = instrumentationService.lookupMethodsToInstrument(TestCheckerCtors.class.getName()); + + assertThat(checkMethods, aMapWithSize(2)); + assertThat( + checkMethods, + hasEntry( + equalTo(new MethodKey("org/example/TestTargetClass", "", List.of("I", "java/lang/String"))), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerCtors", + "check$org_example_TestTargetClass$", + List.of("Ljava/lang/Class;", "I", "Ljava/lang/String;") + ) + ) + ) + ); + assertThat( + checkMethods, + hasEntry( + equalTo(new MethodKey("org/example/TestTargetClass", "", List.of())), + equalTo( + new CheckMethod( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerCtors", + "check$org_example_TestTargetClass$", + List.of("Ljava/lang/Class;") + ) + ) + ) + ); + } + + public void testParseCheckerMethodSignatureStaticMethod() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$org_example_TestClass$staticMethod", + new Type[] { Type.getType(Class.class) } + ); + + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "staticMethod", List.of()))); + } + + public void testParseCheckerMethodSignatureStaticMethodWithArgs() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$org_example_TestClass$staticMethod", + new Type[] { Type.getType(Class.class), Type.getType("I"), Type.getType(String.class) } + ); + + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "staticMethod", List.of("I", "java/lang/String")))); + } + + public void testParseCheckerMethodSignatureStaticMethodInnerClass() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$org_example_TestClass$InnerClass$staticMethod", + new Type[] { Type.getType(Class.class) } + ); + + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass$InnerClass", "staticMethod", List.of()))); + } + + public void testParseCheckerMethodSignatureCtor() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$org_example_TestClass$", + new Type[] { Type.getType(Class.class) } + ); + + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "", List.of()))); + } + + public void testParseCheckerMethodSignatureCtorWithArgs() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$org_example_TestClass$", + new Type[] { Type.getType(Class.class), Type.getType("I"), Type.getType(String.class) } + ); + + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "", List.of("I", "java/lang/String")))); + } + + public void testParseCheckerMethodSignatureIncorrectName() { + var exception = assertThrows( + IllegalArgumentException.class, + () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$staticMethod", new Type[] { Type.getType(Class.class) }) + ); + + assertThat(exception.getMessage(), containsString("has incorrect name format")); + } + + public void testParseCheckerMethodSignatureStaticMethodIncorrectArgumentCount() { + var exception = assertThrows( + IllegalArgumentException.class, + () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$ClassName$staticMethod", new Type[] {}) + ); + assertThat(exception.getMessage(), containsString("It must have a first argument of Class type")); + } + + public void testParseCheckerMethodSignatureStaticMethodIncorrectArgumentType() { + var exception = assertThrows( + IllegalArgumentException.class, + () -> InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$ClassName$staticMethod", + new Type[] { Type.getType(String.class) } + ) + ); + assertThat(exception.getMessage(), containsString("It must have a first argument of Class type")); + } + + public void testParseCheckerMethodSignatureInstanceMethod() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$$instanceMethod", + new Type[] { Type.getType(Class.class), Type.getType(TestTargetClass.class) } + ); + + assertThat( + methodKey, + equalTo( + new MethodKey( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", + "instanceMethod", + List.of() + ) + ) + ); + } + + public void testParseCheckerMethodSignatureInstanceMethodWithArgs() { + var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$$instanceMethod", + new Type[] { Type.getType(Class.class), Type.getType(TestTargetClass.class), Type.getType("I"), Type.getType(String.class) } + ); + + assertThat( + methodKey, + equalTo( + new MethodKey( + "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", + "instanceMethod", + List.of("I", "java/lang/String") + ) + ) + ); + } + + public void testParseCheckerMethodSignatureInstanceMethodIncorrectArgumentTypes() { + var exception = assertThrows( + IllegalArgumentException.class, + () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$$instanceMethod", new Type[] { Type.getType(String.class) }) + ); + assertThat(exception.getMessage(), containsString("It must have a first argument of Class type")); + } + + public void testParseCheckerMethodSignatureInstanceMethodIncorrectArgumentCount() { + var exception = assertThrows( + IllegalArgumentException.class, + () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$$instanceMethod", new Type[] { Type.getType(Class.class) }) + ); + assertThat(exception.getMessage(), containsString("a second argument of the class containing the method to instrument")); + } + + public void testParseCheckerMethodSignatureInstanceMethodIncorrectArgumentTypes2() { + var exception = assertThrows( + IllegalArgumentException.class, + () -> InstrumentationServiceImpl.parseCheckerMethodSignature( + "check$$instanceMethod", + new Type[] { Type.getType(Class.class), Type.getType("I") } + ) + ); + assertThat(exception.getMessage(), containsString("a second argument of the class containing the method to instrument")); + } +} diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java index 9a57e199d4907..c8e1b26d1fc52 100644 --- a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java @@ -9,9 +9,9 @@ package org.elasticsearch.entitlement.instrumentation.impl; -import org.elasticsearch.common.Strings; import org.elasticsearch.entitlement.bridge.EntitlementChecker; -import org.elasticsearch.entitlement.instrumentation.InstrumentationService; +import org.elasticsearch.entitlement.instrumentation.CheckMethod; +import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; @@ -21,12 +21,21 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.util.Arrays; -import java.util.stream.Collectors; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.net.URLStreamHandlerFactory; +import java.util.List; +import java.util.Map; import static org.elasticsearch.entitlement.instrumentation.impl.ASMUtils.bytecode2text; -import static org.elasticsearch.entitlement.instrumentation.impl.InstrumenterImpl.getClassFileInfo; -import static org.hamcrest.Matchers.is; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.callStaticMethod; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.getCheckMethod; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.methodKeyForConstructor; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.methodKeyForTarget; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.objectweb.asm.Opcodes.INVOKESTATIC; /** @@ -36,7 +45,6 @@ */ @ESTestCase.WithoutSecurityManager public class InstrumenterTests extends ESTestCase { - final InstrumentationService instrumentationService = new InstrumentationServiceImpl(); static volatile TestEntitlementChecker testChecker; @@ -64,17 +72,29 @@ public interface Testable {} * They must not throw {@link TestException}. */ public static class ClassToInstrument implements Testable { + + public ClassToInstrument() {} + + // URLClassLoader ctor + public ClassToInstrument(URL[] urls) {} + public static void systemExit(int status) { assertEquals(123, status); } + } - public static void anotherSystemExit(int status) { - assertEquals(123, status); + private static final String SAMPLE_NAME = "TEST"; + + private static final URL SAMPLE_URL = createSampleUrl(); + + private static URL createSampleUrl() { + try { + return URI.create("file:/test/example").toURL(); + } catch (MalformedURLException e) { + return null; } } - static final class TestException extends RuntimeException {} - /** * We're not testing the permission checking logic here; * only that the instrumented methods are calling the correct check methods with the correct arguments. @@ -90,15 +110,68 @@ public static class TestEntitlementChecker implements EntitlementChecker { volatile boolean isActive; int checkSystemExitCallCount = 0; + int checkURLClassLoaderCallCount = 0; @Override - public void checkSystemExit(Class callerClass, int status) { + public void check$java_lang_System$exit(Class callerClass, int status) { checkSystemExitCallCount++; - assertSame(InstrumenterTests.class, callerClass); + assertSame(TestMethodUtils.class, callerClass); assertEquals(123, status); throwIfActive(); } + @Override + public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { + checkURLClassLoaderCallCount++; + assertSame(InstrumenterTests.class, callerClass); + assertThat(urls, arrayContaining(SAMPLE_URL)); + throwIfActive(); + } + + @Override + public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent) { + checkURLClassLoaderCallCount++; + assertSame(InstrumenterTests.class, callerClass); + assertThat(urls, arrayContaining(SAMPLE_URL)); + assertThat(parent, equalTo(ClassLoader.getSystemClassLoader())); + throwIfActive(); + } + + @Override + public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory) { + checkURLClassLoaderCallCount++; + assertSame(InstrumenterTests.class, callerClass); + assertThat(urls, arrayContaining(SAMPLE_URL)); + assertThat(parent, equalTo(ClassLoader.getSystemClassLoader())); + throwIfActive(); + } + + @Override + public void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent) { + checkURLClassLoaderCallCount++; + assertSame(InstrumenterTests.class, callerClass); + assertThat(name, equalTo(SAMPLE_NAME)); + assertThat(urls, arrayContaining(SAMPLE_URL)); + assertThat(parent, equalTo(ClassLoader.getSystemClassLoader())); + throwIfActive(); + } + + @Override + public void check$java_net_URLClassLoader$( + Class callerClass, + String name, + URL[] urls, + ClassLoader parent, + URLStreamHandlerFactory factory + ) { + checkURLClassLoaderCallCount++; + assertSame(InstrumenterTests.class, callerClass); + assertThat(name, equalTo(SAMPLE_NAME)); + assertThat(urls, arrayContaining(SAMPLE_URL)); + assertThat(parent, equalTo(ClassLoader.getSystemClassLoader())); + throwIfActive(); + } + private void throwIfActive() { if (isActive) { throw new TestException(); @@ -106,9 +179,15 @@ private void throwIfActive() { } } - public void testClassIsInstrumented() throws Exception { + public void testSystemExitIsInstrumented() throws Exception { var classToInstrument = ClassToInstrument.class; - var instrumenter = createInstrumenter(classToInstrument, "systemExit"); + + Map checkMethods = Map.of( + methodKeyForTarget(classToInstrument.getMethod("systemExit", int.class)), + getCheckMethod(EntitlementChecker.class, "check$java_lang_System$exit", Class.class, int.class) + ); + + var instrumenter = createInstrumenter(checkMethods); byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); @@ -117,7 +196,7 @@ public void testClassIsInstrumented() throws Exception { } Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( - ClassToInstrument.class.getName() + "_NEW", + classToInstrument.getName() + "_NEW", newBytecode ); @@ -132,77 +211,52 @@ public void testClassIsInstrumented() throws Exception { assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); } - public void testClassIsNotInstrumentedTwice() throws Exception { + public void testURLClassLoaderIsInstrumented() throws Exception { var classToInstrument = ClassToInstrument.class; - var instrumenter = createInstrumenter(classToInstrument, "systemExit"); - - InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); - var internalClassName = Type.getInternalName(classToInstrument); - byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); - byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); - - logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); - logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); - - Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( - ClassToInstrument.class.getName() + "_NEW_NEW", - instrumentedTwiceBytecode + Map checkMethods = Map.of( + methodKeyForConstructor(classToInstrument, List.of(Type.getInternalName(URL[].class))), + getCheckMethod(EntitlementChecker.class, "check$java_net_URLClassLoader$", Class.class, URL[].class) ); - getTestEntitlementChecker().isActive = true; - getTestEntitlementChecker().checkSystemExitCallCount = 0; - - assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); - assertThat(getTestEntitlementChecker().checkSystemExitCallCount, is(1)); - } - - public void testClassAllMethodsAreInstrumentedFirstPass() throws Exception { - var classToInstrument = ClassToInstrument.class; - var instrumenter = createInstrumenter(classToInstrument, "systemExit", "anotherSystemExit"); - - InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); - var internalClassName = Type.getInternalName(classToInstrument); + var instrumenter = createInstrumenter(checkMethods); - byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); - byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); - logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); - logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + if (logger.isTraceEnabled()) { + logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); + } Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( - ClassToInstrument.class.getName() + "_NEW_NEW", - instrumentedTwiceBytecode + classToInstrument.getName() + "_NEW", + newBytecode ); - getTestEntitlementChecker().isActive = true; - getTestEntitlementChecker().checkSystemExitCallCount = 0; + getTestEntitlementChecker().isActive = false; - assertThrows(TestException.class, () -> callStaticMethod(newClass, "systemExit", 123)); - assertThat(getTestEntitlementChecker().checkSystemExitCallCount, is(1)); + // Before checking is active, nothing should throw + newClass.getConstructor(URL[].class).newInstance((Object) new URL[] { SAMPLE_URL }); - assertThrows(TestException.class, () -> callStaticMethod(newClass, "anotherSystemExit", 123)); - assertThat(getTestEntitlementChecker().checkSystemExitCallCount, is(2)); + getTestEntitlementChecker().isActive = true; + + // After checking is activated, everything should throw + var exception = assertThrows( + InvocationTargetException.class, + () -> newClass.getConstructor(URL[].class).newInstance((Object) new URL[] { SAMPLE_URL }) + ); + assertThat(exception.getCause(), instanceOf(TestException.class)); } - /** This test doesn't replace ClassToInstrument in-place but instead loads a separate - * class ClassToInstrument_NEW that contains the instrumentation. Because of this, - * we need to configure the Transformer to use a MethodKey and instrumentationMethod - * with slightly different signatures (using the common interface Testable) which - * is not what would happen when it's run by the agent. + /** This test doesn't replace classToInstrument in-place but instead loads a separate + * class with the same class name plus a "_NEW" suffix (classToInstrument.class.getName() + "_NEW") + * that contains the instrumentation. Because of this, we need to configure the Transformer to use a + * MethodKey and instrumentationMethod with slightly different signatures (using the common interface + * Testable) which is not what would happen when it's run by the agent. */ - private InstrumenterImpl createInstrumenter(Class classToInstrument, String... methodNames) throws NoSuchMethodException { - Method v1 = EntitlementChecker.class.getMethod("checkSystemExit", Class.class, int.class); - var methods = Arrays.stream(methodNames).map(name -> { - try { - return instrumentationService.methodKeyForTarget(classToInstrument.getMethod(name, int.class)); - } catch (NoSuchMethodException e) { - throw new RuntimeException(e); - } - }).collect(Collectors.toUnmodifiableMap(name -> name, name -> v1)); - + private InstrumenterImpl createInstrumenter(Map checkMethods) throws NoSuchMethodException { Method getter = InstrumenterTests.class.getMethod("getTestEntitlementChecker"); - return new InstrumenterImpl("_NEW", methods) { + + return new InstrumenterImpl(null, null, "_NEW", checkMethods) { /** * We're not testing the bridge library here. * Just call our own getter instead. @@ -220,33 +274,5 @@ protected void pushEntitlementChecker(MethodVisitor mv) { }; } - /** - * Calling a static method of a dynamically loaded class is significantly more cumbersome - * than calling a virtual method. - */ - private static void callStaticMethod(Class c, String methodName, int status) throws NoSuchMethodException, IllegalAccessException { - try { - c.getMethod(methodName, int.class).invoke(null, status); - } catch (InvocationTargetException e) { - Throwable cause = e.getCause(); - if (cause instanceof TestException n) { - // Sometimes we're expecting this one! - throw n; - } else { - throw new AssertionError(cause); - } - } - } - - static class TestLoader extends ClassLoader { - TestLoader(ClassLoader parent) { - super(parent); - } - - public Class defineClassFromBytes(String name, byte[] bytes) { - return defineClass(name, bytes, 0, bytes.length); - } - } - private static final Logger logger = LogManager.getLogger(InstrumenterTests.class); } diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/SyntheticInstrumenterTests.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/SyntheticInstrumenterTests.java new file mode 100644 index 0000000000000..8e0409971ba61 --- /dev/null +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/SyntheticInstrumenterTests.java @@ -0,0 +1,383 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.instrumentation.impl; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.entitlement.instrumentation.CheckMethod; +import org.elasticsearch.entitlement.instrumentation.MethodKey; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.test.ESTestCase; +import org.objectweb.asm.Type; + +import java.lang.reflect.InvocationTargetException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.entitlement.instrumentation.impl.ASMUtils.bytecode2text; +import static org.elasticsearch.entitlement.instrumentation.impl.InstrumenterImpl.getClassFileInfo; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.callStaticMethod; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.getCheckMethod; +import static org.elasticsearch.entitlement.instrumentation.impl.TestMethodUtils.methodKeyForTarget; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.startsWith; + +/** + * This tests {@link InstrumenterImpl} with some ad-hoc instrumented method and checker methods, to allow us to check + * some ad-hoc test cases (e.g. overloaded methods, overloaded targets, multiple instrumentation, etc.) + */ +@ESTestCase.WithoutSecurityManager +public class SyntheticInstrumenterTests extends ESTestCase { + private static final Logger logger = LogManager.getLogger(SyntheticInstrumenterTests.class); + + /** + * Contains all the virtual methods from {@link TestClassToInstrument}, + * allowing this test to call them on the dynamically loaded instrumented class. + */ + public interface Testable { + // This method is here to demonstrate Instrumenter does not get confused by overloads + void someMethod(int arg); + + void someMethod(int arg, String anotherArg); + } + + /** + * This is a placeholder for real class library methods. + * Without the java agent, we can't instrument the real methods, so we instrument this instead. + *

+ * Methods of this class must have the same signature and the same static/virtual condition as the corresponding real method. + * They should assert that the arguments came through correctly. + * They must not throw {@link TestException}. + */ + public static class TestClassToInstrument implements Testable { + + public TestClassToInstrument() {} + + public TestClassToInstrument(int arg) {} + + public void someMethod(int arg) {} + + public void someMethod(int arg, String anotherArg) {} + + public static void someStaticMethod(int arg) {} + + public static void someStaticMethod(int arg, String anotherArg) {} + + public static void anotherStaticMethod(int arg) {} + } + + /** + * Interface to test specific, "synthetic" cases (e.g. overloaded methods, overloaded constructors, etc.) that + * may be not present/may be difficult to find or not clear in the production EntitlementChecker interface + */ + public interface MockEntitlementChecker { + void checkSomeStaticMethod(Class clazz, int arg); + + void checkSomeStaticMethod(Class clazz, int arg, String anotherArg); + + void checkSomeInstanceMethod(Class clazz, Testable that, int arg, String anotherArg); + + void checkCtor(Class clazz); + + void checkCtor(Class clazz, int arg); + } + + public static class TestEntitlementCheckerHolder { + static TestEntitlementChecker checkerInstance = new TestEntitlementChecker(); + + public static MockEntitlementChecker instance() { + return checkerInstance; + } + } + + public static class TestEntitlementChecker implements MockEntitlementChecker { + /** + * This allows us to test that the instrumentation is correct in both cases: + * if the check throws, and if it doesn't. + */ + volatile boolean isActive; + + int checkSomeStaticMethodIntCallCount = 0; + int checkSomeStaticMethodIntStringCallCount = 0; + int checkSomeInstanceMethodCallCount = 0; + + int checkCtorCallCount = 0; + int checkCtorIntCallCount = 0; + + private void throwIfActive() { + if (isActive) { + throw new TestException(); + } + } + + @Override + public void checkSomeStaticMethod(Class callerClass, int arg) { + checkSomeStaticMethodIntCallCount++; + assertSame(TestMethodUtils.class, callerClass); + assertEquals(123, arg); + throwIfActive(); + } + + @Override + public void checkSomeStaticMethod(Class callerClass, int arg, String anotherArg) { + checkSomeStaticMethodIntStringCallCount++; + assertSame(TestMethodUtils.class, callerClass); + assertEquals(123, arg); + assertEquals("abc", anotherArg); + throwIfActive(); + } + + @Override + public void checkSomeInstanceMethod(Class callerClass, Testable that, int arg, String anotherArg) { + checkSomeInstanceMethodCallCount++; + assertSame(SyntheticInstrumenterTests.class, callerClass); + assertThat( + that.getClass().getName(), + startsWith("org.elasticsearch.entitlement.instrumentation.impl.SyntheticInstrumenterTests$TestClassToInstrument") + ); + assertEquals(123, arg); + assertEquals("def", anotherArg); + throwIfActive(); + } + + @Override + public void checkCtor(Class callerClass) { + checkCtorCallCount++; + assertSame(SyntheticInstrumenterTests.class, callerClass); + throwIfActive(); + } + + @Override + public void checkCtor(Class callerClass, int arg) { + checkCtorIntCallCount++; + assertSame(SyntheticInstrumenterTests.class, callerClass); + assertEquals(123, arg); + throwIfActive(); + } + } + + public void testClassIsInstrumented() throws Exception { + var classToInstrument = TestClassToInstrument.class; + + CheckMethod checkMethod = getCheckMethod(MockEntitlementChecker.class, "checkSomeStaticMethod", Class.class, int.class); + Map checkMethods = Map.of( + methodKeyForTarget(classToInstrument.getMethod("someStaticMethod", int.class)), + checkMethod + ); + + var instrumenter = createInstrumenter(checkMethods); + + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); + + if (logger.isTraceEnabled()) { + logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); + } + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + classToInstrument.getName() + "_NEW", + newBytecode + ); + + TestEntitlementCheckerHolder.checkerInstance.isActive = false; + + // Before checking is active, nothing should throw + callStaticMethod(newClass, "someStaticMethod", 123); + + TestEntitlementCheckerHolder.checkerInstance.isActive = true; + + // After checking is activated, everything should throw + assertThrows(TestException.class, () -> callStaticMethod(newClass, "someStaticMethod", 123)); + } + + public void testClassIsNotInstrumentedTwice() throws Exception { + var classToInstrument = TestClassToInstrument.class; + + CheckMethod checkMethod = getCheckMethod(MockEntitlementChecker.class, "checkSomeStaticMethod", Class.class, int.class); + Map checkMethods = Map.of( + methodKeyForTarget(classToInstrument.getMethod("someStaticMethod", int.class)), + checkMethod + ); + + var instrumenter = createInstrumenter(checkMethods); + + InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); + var internalClassName = Type.getInternalName(classToInstrument); + + byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); + byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + + logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); + logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + classToInstrument.getName() + "_NEW_NEW", + instrumentedTwiceBytecode + ); + + TestEntitlementCheckerHolder.checkerInstance.isActive = true; + TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount = 0; + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "someStaticMethod", 123)); + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount); + } + + public void testClassAllMethodsAreInstrumentedFirstPass() throws Exception { + var classToInstrument = TestClassToInstrument.class; + + CheckMethod checkMethod = getCheckMethod(MockEntitlementChecker.class, "checkSomeStaticMethod", Class.class, int.class); + Map checkMethods = Map.of( + methodKeyForTarget(classToInstrument.getMethod("someStaticMethod", int.class)), + checkMethod, + methodKeyForTarget(classToInstrument.getMethod("anotherStaticMethod", int.class)), + checkMethod + ); + + var instrumenter = createInstrumenter(checkMethods); + + InstrumenterImpl.ClassFileInfo initial = getClassFileInfo(classToInstrument); + var internalClassName = Type.getInternalName(classToInstrument); + + byte[] instrumentedBytecode = instrumenter.instrumentClass(internalClassName, initial.bytecodes()); + byte[] instrumentedTwiceBytecode = instrumenter.instrumentClass(internalClassName, instrumentedBytecode); + + logger.trace(() -> Strings.format("Bytecode after 1st instrumentation:\n%s", bytecode2text(instrumentedBytecode))); + logger.trace(() -> Strings.format("Bytecode after 2nd instrumentation:\n%s", bytecode2text(instrumentedTwiceBytecode))); + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + classToInstrument.getName() + "_NEW_NEW", + instrumentedTwiceBytecode + ); + + TestEntitlementCheckerHolder.checkerInstance.isActive = true; + TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount = 0; + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "someStaticMethod", 123)); + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount); + + assertThrows(TestException.class, () -> callStaticMethod(newClass, "anotherStaticMethod", 123)); + assertEquals(2, TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount); + } + + public void testInstrumenterWorksWithOverloads() throws Exception { + var classToInstrument = TestClassToInstrument.class; + + Map checkMethods = Map.of( + methodKeyForTarget(classToInstrument.getMethod("someStaticMethod", int.class)), + getCheckMethod(MockEntitlementChecker.class, "checkSomeStaticMethod", Class.class, int.class), + methodKeyForTarget(classToInstrument.getMethod("someStaticMethod", int.class, String.class)), + getCheckMethod(MockEntitlementChecker.class, "checkSomeStaticMethod", Class.class, int.class, String.class) + ); + + var instrumenter = createInstrumenter(checkMethods); + + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); + + if (logger.isTraceEnabled()) { + logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); + } + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + classToInstrument.getName() + "_NEW", + newBytecode + ); + + TestEntitlementCheckerHolder.checkerInstance.isActive = true; + TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount = 0; + TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntStringCallCount = 0; + + // After checking is activated, everything should throw + assertThrows(TestException.class, () -> callStaticMethod(newClass, "someStaticMethod", 123)); + assertThrows(TestException.class, () -> callStaticMethod(newClass, "someStaticMethod", 123, "abc")); + + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntCallCount); + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkSomeStaticMethodIntStringCallCount); + } + + public void testInstrumenterWorksWithInstanceMethodsAndOverloads() throws Exception { + var classToInstrument = TestClassToInstrument.class; + + Map checkMethods = Map.of( + methodKeyForTarget(classToInstrument.getMethod("someMethod", int.class, String.class)), + getCheckMethod(MockEntitlementChecker.class, "checkSomeInstanceMethod", Class.class, Testable.class, int.class, String.class) + ); + + var instrumenter = createInstrumenter(checkMethods); + + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); + + if (logger.isTraceEnabled()) { + logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); + } + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + classToInstrument.getName() + "_NEW", + newBytecode + ); + + TestEntitlementCheckerHolder.checkerInstance.isActive = true; + TestEntitlementCheckerHolder.checkerInstance.checkSomeInstanceMethodCallCount = 0; + + Testable testTargetClass = (Testable) (newClass.getConstructor().newInstance()); + + // This overload is not instrumented, so it will not throw + testTargetClass.someMethod(123); + assertThrows(TestException.class, () -> testTargetClass.someMethod(123, "def")); + + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkSomeInstanceMethodCallCount); + } + + public void testInstrumenterWorksWithConstructors() throws Exception { + var classToInstrument = TestClassToInstrument.class; + + Map checkMethods = Map.of( + new MethodKey(classToInstrument.getName().replace('.', '/'), "", List.of()), + getCheckMethod(MockEntitlementChecker.class, "checkCtor", Class.class), + new MethodKey(classToInstrument.getName().replace('.', '/'), "", List.of("I")), + getCheckMethod(MockEntitlementChecker.class, "checkCtor", Class.class, int.class) + ); + + var instrumenter = createInstrumenter(checkMethods); + + byte[] newBytecode = instrumenter.instrumentClassFile(classToInstrument).bytecodes(); + + if (logger.isTraceEnabled()) { + logger.trace("Bytecode after instrumentation:\n{}", bytecode2text(newBytecode)); + } + + Class newClass = new TestLoader(Testable.class.getClassLoader()).defineClassFromBytes( + classToInstrument.getName() + "_NEW", + newBytecode + ); + + TestEntitlementCheckerHolder.checkerInstance.isActive = true; + + var ex = assertThrows(InvocationTargetException.class, () -> newClass.getConstructor().newInstance()); + assertThat(ex.getCause(), instanceOf(TestException.class)); + var ex2 = assertThrows(InvocationTargetException.class, () -> newClass.getConstructor(int.class).newInstance(123)); + assertThat(ex2.getCause(), instanceOf(TestException.class)); + + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkCtorCallCount); + assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkCtorIntCallCount); + } + + /** This test doesn't replace classToInstrument in-place but instead loads a separate + * class with the same class name plus a "_NEW" suffix (classToInstrument.class.getName() + "_NEW") + * that contains the instrumentation. Because of this, we need to configure the Transformer to use a + * MethodKey and instrumentationMethod with slightly different signatures (using the common interface + * Testable) which is not what would happen when it's run by the agent. + */ + private InstrumenterImpl createInstrumenter(Map checkMethods) { + String checkerClass = Type.getInternalName(SyntheticInstrumenterTests.MockEntitlementChecker.class); + String handleClass = Type.getInternalName(SyntheticInstrumenterTests.TestEntitlementCheckerHolder.class); + String getCheckerClassMethodDescriptor = Type.getMethodDescriptor(Type.getObjectType(checkerClass)); + + return new InstrumenterImpl(handleClass, getCheckerClassMethodDescriptor, "_NEW", checkMethods); + } +} diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestException.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestException.java new file mode 100644 index 0000000000000..5e308e5bd4a98 --- /dev/null +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestException.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.instrumentation.impl; + +final class TestException extends RuntimeException {} diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestLoader.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestLoader.java new file mode 100644 index 0000000000000..9eb8e9328ecba --- /dev/null +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestLoader.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.instrumentation.impl; + +class TestLoader extends ClassLoader { + TestLoader(ClassLoader parent) { + super(parent); + } + + public Class defineClassFromBytes(String name, byte[] bytes) { + return defineClass(name, bytes, 0, bytes.length); + } +} diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestMethodUtils.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestMethodUtils.java new file mode 100644 index 0000000000000..de7822fea926e --- /dev/null +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/TestMethodUtils.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.instrumentation.impl; + +import org.elasticsearch.entitlement.instrumentation.CheckMethod; +import org.elasticsearch.entitlement.instrumentation.MethodKey; +import org.objectweb.asm.Type; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; + +class TestMethodUtils { + + /** + * @return a {@link MethodKey} suitable for looking up the given {@code targetMethod} in the entitlements trampoline + */ + static MethodKey methodKeyForTarget(Method targetMethod) { + Type actualType = Type.getMethodType(Type.getMethodDescriptor(targetMethod)); + return new MethodKey( + Type.getInternalName(targetMethod.getDeclaringClass()), + targetMethod.getName(), + Stream.of(actualType.getArgumentTypes()).map(Type::getInternalName).toList() + ); + } + + static MethodKey methodKeyForConstructor(Class classToInstrument, List params) { + return new MethodKey(classToInstrument.getName().replace('.', '/'), "", params); + } + + static CheckMethod getCheckMethod(Class clazz, String methodName, Class... parameterTypes) throws NoSuchMethodException { + var method = clazz.getMethod(methodName, parameterTypes); + return new CheckMethod( + Type.getInternalName(clazz), + method.getName(), + Arrays.stream(Type.getArgumentTypes(method)).map(Type::getDescriptor).toList() + ); + } + + /** + * Calling a static method of a dynamically loaded class is significantly more cumbersome + * than calling a virtual method. + */ + static void callStaticMethod(Class c, String methodName, int arg) throws NoSuchMethodException, IllegalAccessException { + try { + c.getMethod(methodName, int.class).invoke(null, arg); + } catch (InvocationTargetException e) { + Throwable cause = e.getCause(); + if (cause instanceof TestException n) { + // Sometimes we're expecting this one! + throw n; + } else { + throw new AssertionError(cause); + } + } + } + + static void callStaticMethod(Class c, String methodName, int arg1, String arg2) throws NoSuchMethodException, + IllegalAccessException { + try { + c.getMethod(methodName, int.class, String.class).invoke(null, arg1, arg2); + } catch (InvocationTargetException e) { + Throwable cause = e.getCause(); + if (cause instanceof TestException n) { + // Sometimes we're expecting this one! + throw n; + } else { + throw new AssertionError(cause); + } + } + } +} diff --git a/libs/entitlement/bridge/build.gradle b/libs/entitlement/bridge/build.gradle index 3d59dd3eaf33e..a9f8f6e3a3b0a 100644 --- a/libs/entitlement/bridge/build.gradle +++ b/libs/entitlement/bridge/build.gradle @@ -7,19 +7,18 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ +import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask + apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.mrjar' -configurations { - bridgeJar { - canBeConsumed = true - canBeResolved = false +tasks.named('jar').configure { + // guarding for intellij + if (sourceSets.findByName("main23")) { + from sourceSets.main23.output } } -artifacts { - bridgeJar(jar) -} - -tasks.named('forbiddenApisMain').configure { +tasks.withType(CheckForbiddenApisTask).configureEach { replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index 5ebb7d00e26f5..ad0f14bcf4478 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -9,6 +9,20 @@ package org.elasticsearch.entitlement.bridge; +import java.net.URL; +import java.net.URLStreamHandlerFactory; + public interface EntitlementChecker { - void checkSystemExit(Class callerClass, int status); + void check$java_lang_System$exit(Class callerClass, int status); + + // URLClassLoader ctor + void check$java_net_URLClassLoader$(Class callerClass, URL[] urls); + + void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent); + + void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); + + void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent); + + void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); } diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementCheckerHandle.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementCheckerHandle.java index 2fe4a163a4136..26c9c83b8eb51 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementCheckerHandle.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementCheckerHandle.java @@ -9,9 +9,6 @@ package org.elasticsearch.entitlement.bridge; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - /** * Makes the {@link EntitlementChecker} available to injected bytecode. */ @@ -35,27 +32,7 @@ private static class Holder { * The {@code EntitlementInitialization} class is what actually instantiates it and makes it available; * here, we copy it into a static final variable for maximum performance. */ - private static final EntitlementChecker instance; - static { - String initClazz = "org.elasticsearch.entitlement.initialization.EntitlementInitialization"; - final Class clazz; - try { - clazz = ClassLoader.getSystemClassLoader().loadClass(initClazz); - } catch (ClassNotFoundException e) { - throw new AssertionError("java.base cannot find entitlement initialziation", e); - } - final Method checkerMethod; - try { - checkerMethod = clazz.getMethod("checker"); - } catch (NoSuchMethodException e) { - throw new AssertionError("EntitlementInitialization is missing checker() method", e); - } - try { - instance = (EntitlementChecker) checkerMethod.invoke(null); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new AssertionError(e); - } - } + private static final EntitlementChecker instance = HandleLoader.load(EntitlementChecker.class); } // no construction diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/HandleLoader.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/HandleLoader.java new file mode 100644 index 0000000000000..bbfec47884f79 --- /dev/null +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/HandleLoader.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.bridge; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +class HandleLoader { + + static T load(Class checkerClass) { + String initClassName = "org.elasticsearch.entitlement.initialization.EntitlementInitialization"; + final Class initClazz; + try { + initClazz = ClassLoader.getSystemClassLoader().loadClass(initClassName); + } catch (ClassNotFoundException e) { + throw new AssertionError("java.base cannot find entitlement initialization", e); + } + final Method checkerMethod; + try { + checkerMethod = initClazz.getMethod("checker"); + } catch (NoSuchMethodException e) { + throw new AssertionError("EntitlementInitialization is missing checker() method", e); + } + try { + return checkerClass.cast(checkerMethod.invoke(null)); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new AssertionError(e); + } + } + + // no instance + private HandleLoader() {} +} diff --git a/libs/entitlement/bridge/src/main23/java/org/elasticsearch/entitlement/bridge/Java23EntitlementChecker.java b/libs/entitlement/bridge/src/main23/java/org/elasticsearch/entitlement/bridge/Java23EntitlementChecker.java new file mode 100644 index 0000000000000..244632e80ffa0 --- /dev/null +++ b/libs/entitlement/bridge/src/main23/java/org/elasticsearch/entitlement/bridge/Java23EntitlementChecker.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.bridge; + +public interface Java23EntitlementChecker extends EntitlementChecker {} diff --git a/libs/entitlement/bridge/src/main23/java/org/elasticsearch/entitlement/bridge/Java23EntitlementCheckerHandle.java b/libs/entitlement/bridge/src/main23/java/org/elasticsearch/entitlement/bridge/Java23EntitlementCheckerHandle.java new file mode 100644 index 0000000000000..f41c5dcdf14fd --- /dev/null +++ b/libs/entitlement/bridge/src/main23/java/org/elasticsearch/entitlement/bridge/Java23EntitlementCheckerHandle.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.bridge; + +/** + * Java23 variant of {@link EntitlementChecker} handle holder. + */ +public class Java23EntitlementCheckerHandle { + + public static Java23EntitlementChecker instance() { + return Holder.instance; + } + + private static class Holder { + private static final Java23EntitlementChecker instance = HandleLoader.load(Java23EntitlementChecker.class); + } + + // no construction + private Java23EntitlementCheckerHandle() {} +} diff --git a/libs/entitlement/build.gradle b/libs/entitlement/build.gradle index 12e0bb48a54b7..841591873153c 100644 --- a/libs/entitlement/build.gradle +++ b/libs/entitlement/build.gradle @@ -6,10 +6,13 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ + +import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask + apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' - apply plugin: 'elasticsearch.embedded-providers' +apply plugin: 'elasticsearch.mrjar' embeddedProviders { impl 'entitlement', project(':libs:entitlement:asm-provider') @@ -23,8 +26,13 @@ dependencies { testImplementation(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'entitlement' } + + // guarding for intellij + if (sourceSets.findByName("main23")) { + main23CompileOnly project(path: ':libs:entitlement:bridge', configuration: 'java23') + } } -tasks.named('forbiddenApisMain').configure { +tasks.withType(CheckForbiddenApisTask).configureEach { replaceSignatureFiles 'jdk-signatures' } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java index 7f68457baea9e..01b8f4d574f90 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java @@ -15,6 +15,7 @@ import com.sun.tools.attach.VirtualMachine; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.Tuple; import org.elasticsearch.entitlement.initialization.EntitlementInitialization; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -22,15 +23,33 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collection; +import java.util.Objects; +import java.util.function.Function; public class EntitlementBootstrap { + public record BootstrapArgs(Collection> pluginData, Function, String> pluginResolver) {} + + private static BootstrapArgs bootstrapArgs; + + public static BootstrapArgs bootstrapArgs() { + return bootstrapArgs; + } + /** - * Activates entitlement checking. Once this method returns, calls to forbidden methods - * will throw {@link org.elasticsearch.entitlement.runtime.api.NotEntitledException}. + * Activates entitlement checking. Once this method returns, calls to methods protected by Entitlements from classes without a valid + * policy will throw {@link org.elasticsearch.entitlement.runtime.api.NotEntitledException}. + * @param pluginData a collection of (plugin path, boolean), that holds the paths of all the installed Elasticsearch modules and + * plugins, and whether they are Java modular or not. + * @param pluginResolver a functor to map a Java Class to the plugin it belongs to (the plugin name). */ - public static void bootstrap() { + public static void bootstrap(Collection> pluginData, Function, String> pluginResolver) { logger.debug("Loading entitlement agent"); + if (EntitlementBootstrap.bootstrapArgs != null) { + throw new IllegalStateException("plugin data is already set"); + } + EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(Objects.requireNonNull(pluginData), Objects.requireNonNull(pluginResolver)); exportInitializationToAgent(); loadAgent(findAgentJar()); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 155d5a27c606b..0ffab5f93969f 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -9,17 +9,37 @@ package org.elasticsearch.entitlement.initialization; +import org.elasticsearch.core.Tuple; import org.elasticsearch.core.internal.provider.ProviderLocator; +import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap; import org.elasticsearch.entitlement.bridge.EntitlementChecker; +import org.elasticsearch.entitlement.instrumentation.CheckMethod; import org.elasticsearch.entitlement.instrumentation.InstrumentationService; import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.entitlement.instrumentation.Transformer; import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; +import org.elasticsearch.entitlement.runtime.policy.Policy; +import org.elasticsearch.entitlement.runtime.policy.PolicyManager; +import org.elasticsearch.entitlement.runtime.policy.PolicyParser; +import org.elasticsearch.entitlement.runtime.policy.Scope; +import java.io.IOException; import java.lang.instrument.Instrumentation; -import java.lang.reflect.Method; +import java.lang.module.ModuleFinder; +import java.lang.module.ModuleReference; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; /** * Called by the agent during {@code agentmain} to configure the entitlement system, @@ -29,6 +49,9 @@ * to begin injecting our instrumentation. */ public class EntitlementInitialization { + + private static final String POLICY_FILE_NAME = "entitlement-policy.yaml"; + private static ElasticsearchEntitlementChecker manager; // Note: referenced by bridge reflectively @@ -38,20 +61,112 @@ public static EntitlementChecker checker() { // Note: referenced by agent reflectively public static void initialize(Instrumentation inst) throws Exception { - manager = new ElasticsearchEntitlementChecker(); + manager = initChecker(); + + Map checkMethods = INSTRUMENTER_FACTORY.lookupMethodsToInstrument( + "org.elasticsearch.entitlement.bridge.EntitlementChecker" + ); + + var classesToTransform = checkMethods.keySet().stream().map(MethodKey::className).collect(Collectors.toSet()); + + inst.addTransformer(new Transformer(INSTRUMENTER_FACTORY.newInstrumenter(checkMethods), classesToTransform), true); + // TODO: should we limit this array somehow? + var classesToRetransform = classesToTransform.stream().map(EntitlementInitialization::internalNameToClass).toArray(Class[]::new); + inst.retransformClasses(classesToRetransform); + } + + private static Class internalNameToClass(String internalName) { + try { + return Class.forName(internalName.replace('/', '.'), false, ClassLoader.getPlatformClassLoader()); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } - // TODO: Configure actual entitlement grants instead of this hardcoded one - Method targetMethod = System.class.getMethod("exit", int.class); - Method instrumentationMethod = Class.forName("org.elasticsearch.entitlement.bridge.EntitlementChecker") - .getMethod("checkSystemExit", Class.class, int.class); - Map methodMap = Map.of(INSTRUMENTER_FACTORY.methodKeyForTarget(targetMethod), instrumentationMethod); + private static PolicyManager createPolicyManager() throws IOException { + Map pluginPolicies = createPluginPolicies(EntitlementBootstrap.bootstrapArgs().pluginData()); - inst.addTransformer(new Transformer(INSTRUMENTER_FACTORY.newInstrumenter("", methodMap), Set.of(internalName(System.class))), true); - inst.retransformClasses(System.class); + // TODO: What should the name be? + // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it + var serverPolicy = new Policy("server", List.of()); + return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); } - private static String internalName(Class c) { - return c.getName().replace('.', '/'); + private static Map createPluginPolicies(Collection> pluginData) throws IOException { + Map pluginPolicies = new HashMap<>(pluginData.size()); + for (Tuple entry : pluginData) { + Path pluginRoot = entry.v1(); + boolean isModular = entry.v2(); + + String pluginName = pluginRoot.getFileName().toString(); + final Policy policy = loadPluginPolicy(pluginRoot, isModular, pluginName); + + pluginPolicies.put(pluginName, policy); + } + return pluginPolicies; + } + + private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, String pluginName) throws IOException { + Path policyFile = pluginRoot.resolve(POLICY_FILE_NAME); + + final Set moduleNames = getModuleNames(pluginRoot, isModular); + final Policy policy = parsePolicyIfExists(pluginName, policyFile); + + // TODO: should this check actually be part of the parser? + for (Scope scope : policy.scopes) { + if (moduleNames.contains(scope.name) == false) { + throw new IllegalStateException("policy [" + policyFile + "] contains invalid module [" + scope.name + "]"); + } + } + return policy; + } + + private static Policy parsePolicyIfExists(String pluginName, Path policyFile) throws IOException { + if (Files.exists(policyFile)) { + return new PolicyParser(Files.newInputStream(policyFile, StandardOpenOption.READ), pluginName).parsePolicy(); + } + return new Policy(pluginName, List.of()); + } + + private static Set getModuleNames(Path pluginRoot, boolean isModular) { + if (isModular) { + ModuleFinder moduleFinder = ModuleFinder.of(pluginRoot); + Set moduleReferences = moduleFinder.findAll(); + + return moduleReferences.stream().map(mr -> mr.descriptor().name()).collect(Collectors.toUnmodifiableSet()); + } + // When isModular == false we use the same "ALL-UNNAMED" constant as the JDK to indicate (any) unnamed module for this plugin + return Set.of(ALL_UNNAMED); + } + + private static ElasticsearchEntitlementChecker initChecker() throws IOException { + final PolicyManager policyManager = createPolicyManager(); + + int javaVersion = Runtime.version().feature(); + final String classNamePrefix; + if (javaVersion >= 23) { + classNamePrefix = "Java23"; + } else { + classNamePrefix = ""; + } + final String className = "org.elasticsearch.entitlement.runtime.api." + classNamePrefix + "ElasticsearchEntitlementChecker"; + Class clazz; + try { + clazz = Class.forName(className); + } catch (ClassNotFoundException e) { + throw new AssertionError("entitlement lib cannot find entitlement impl", e); + } + Constructor constructor; + try { + constructor = clazz.getConstructor(PolicyManager.class); + } catch (NoSuchMethodException e) { + throw new AssertionError("entitlement impl is missing no arg constructor", e); + } + try { + return (ElasticsearchEntitlementChecker) constructor.newInstance(policyManager); + } catch (IllegalAccessException | InvocationTargetException | InstantiationException e) { + throw new AssertionError(e); + } } private static final InstrumentationService INSTRUMENTER_FACTORY = new ProviderLocator<>( diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/CheckMethod.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/CheckMethod.java new file mode 100644 index 0000000000000..384d455c7a34b --- /dev/null +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/CheckMethod.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.instrumentation; + +import java.util.List; + +/** + * A structure to use as a representation of the checkXxx method the instrumentation will inject. + * + * @param className the "internal name" of the class: includes the package info, but with periods replaced by slashes + * @param methodName the checker method name + * @param parameterDescriptors a list of + * type descriptors) + * for methodName parameters. + */ +public record CheckMethod(String className, String methodName, List parameterDescriptors) {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java index 25fa84ec7c4ba..d0331d756d2b2 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/InstrumentationService.java @@ -9,17 +9,14 @@ package org.elasticsearch.entitlement.instrumentation; -import java.lang.reflect.Method; +import java.io.IOException; import java.util.Map; /** * The SPI service entry point for instrumentation. */ public interface InstrumentationService { - Instrumenter newInstrumenter(String classNameSuffix, Map instrumentationMethods); + Instrumenter newInstrumenter(Map checkMethods); - /** - * @return a {@link MethodKey} suitable for looking up the given {@code targetMethod} in the entitlements trampoline - */ - MethodKey methodKeyForTarget(Method targetMethod); + Map lookupMethodsToInstrument(String entitlementCheckerClassName) throws ClassNotFoundException, IOException; } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java index 54e09c10bcc57..256a4d709d9dc 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java @@ -12,7 +12,10 @@ import java.util.List; /** + * A structure to use as a key/lookup for a method target of instrumentation * - * @param className the "internal name" of the class: includes the package info, but with periods replaced by slashes + * @param className the "internal name" of the class: includes the package info, but with periods replaced by slashes + * @param methodName the method name + * @param parameterTypes a list of "internal names" for the parameter types */ -public record MethodKey(String className, String methodName, List parameterTypes, boolean isStatic) {} +public record MethodKey(String className, String methodName, List parameterTypes) {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index 6d5dbd4098aa9..28a080470c043 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -10,10 +10,11 @@ package org.elasticsearch.entitlement.runtime.api; import org.elasticsearch.entitlement.bridge.EntitlementChecker; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; +import org.elasticsearch.entitlement.runtime.policy.FlagEntitlementType; +import org.elasticsearch.entitlement.runtime.policy.PolicyManager; -import java.util.Optional; +import java.net.URL; +import java.net.URLStreamHandlerFactory; /** * Implementation of the {@link EntitlementChecker} interface, providing additional @@ -21,51 +22,45 @@ * The trampoline module loads this object via SPI. */ public class ElasticsearchEntitlementChecker implements EntitlementChecker { - private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); + private final PolicyManager policyManager; + + public ElasticsearchEntitlementChecker(PolicyManager policyManager) { + this.policyManager = policyManager; + } @Override - public void checkSystemExit(Class callerClass, int status) { - var requestingModule = requestingModule(callerClass); - if (isTriviallyAllowed(requestingModule)) { - return; - } - // Hard-forbidden until we develop the permission granting scheme - throw new NotEntitledException("Missing entitlement for " + requestingModule); + public void check$java_lang_System$exit(Class callerClass, int status) { + policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.SYSTEM_EXIT); } - private static Module requestingModule(Class callerClass) { - if (callerClass != null) { - Module callerModule = callerClass.getModule(); - if (callerModule.getLayer() != ModuleLayer.boot()) { - // fast path - return callerModule; - } - } - int framesToSkip = 1 // getCallingClass (this method) - + 1 // the checkXxx method - + 1 // the runtime config method - + 1 // the instrumented method - ; - Optional module = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE) - .walk( - s -> s.skip(framesToSkip) - .map(f -> f.getDeclaringClass().getModule()) - .filter(m -> m.getLayer() != ModuleLayer.boot()) - .findFirst() - ); - return module.orElse(null); + @Override + public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { + policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); } - private static boolean isTriviallyAllowed(Module requestingModule) { - if (requestingModule == null) { - logger.debug("Trivially allowed: Entire call stack is in the boot module layer"); - return true; - } - if (requestingModule == System.class.getModule()) { - logger.debug("Trivially allowed: Caller is in {}", System.class.getModule().getName()); - return true; - } - logger.trace("Not trivially allowed"); - return false; + @Override + public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent) { + policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + } + + @Override + public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory) { + policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + } + + @Override + public void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent) { + policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + } + + @Override + public void check$java_net_URLClassLoader$( + Class callerClass, + String name, + URL[] urls, + ClassLoader parent, + URLStreamHandlerFactory factory + ) { + policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java new file mode 100644 index 0000000000000..d40235ee12166 --- /dev/null +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +public enum FlagEntitlementType { + SYSTEM_EXIT, + CREATE_CLASSLOADER; +} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java new file mode 100644 index 0000000000000..b3fb5b75a1d5a --- /dev/null +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +import org.elasticsearch.core.Strings; +import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; +import org.elasticsearch.entitlement.runtime.api.NotEntitledException; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +import java.lang.module.ModuleFinder; +import java.lang.module.ModuleReference; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class PolicyManager { + private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); + + protected final Policy serverPolicy; + protected final Map pluginPolicies; + private final Function, String> pluginResolver; + + public static final String ALL_UNNAMED = "ALL-UNNAMED"; + + private static final Set systemModules = findSystemModules(); + + private static Set findSystemModules() { + var systemModulesDescriptors = ModuleFinder.ofSystem() + .findAll() + .stream() + .map(ModuleReference::descriptor) + .collect(Collectors.toUnmodifiableSet()); + + return ModuleLayer.boot() + .modules() + .stream() + .filter(m -> systemModulesDescriptors.contains(m.getDescriptor())) + .collect(Collectors.toUnmodifiableSet()); + } + + public PolicyManager(Policy defaultPolicy, Map pluginPolicies, Function, String> pluginResolver) { + this.serverPolicy = Objects.requireNonNull(defaultPolicy); + this.pluginPolicies = Collections.unmodifiableMap(Objects.requireNonNull(pluginPolicies)); + this.pluginResolver = pluginResolver; + } + + public void checkFlagEntitlement(Class callerClass, FlagEntitlementType type) { + var requestingModule = requestingModule(callerClass); + if (isTriviallyAllowed(requestingModule)) { + return; + } + + // TODO: real policy check. For now, we only allow our hardcoded System.exit policy for server. + // TODO: this will be checked using policies + if (requestingModule.isNamed() + && requestingModule.getName().equals("org.elasticsearch.server") + && (type == FlagEntitlementType.SYSTEM_EXIT || type == FlagEntitlementType.CREATE_CLASSLOADER)) { + logger.debug("Allowed: caller [{}] in module [{}] has entitlement [{}]", callerClass, requestingModule.getName(), type); + return; + } + + // TODO: plugins policy check using pluginResolver and pluginPolicies + throw new NotEntitledException( + Strings.format("Missing entitlement [%s] for caller [%s] in module [%s]", type, callerClass, requestingModule.getName()) + ); + } + + private static Module requestingModule(Class callerClass) { + if (callerClass != null) { + Module callerModule = callerClass.getModule(); + if (systemModules.contains(callerModule) == false) { + // fast path + return callerModule; + } + } + int framesToSkip = 1 // getCallingClass (this method) + + 1 // the checkXxx method + + 1 // the runtime config method + + 1 // the instrumented method + ; + Optional module = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE) + .walk( + s -> s.skip(framesToSkip) + .map(f -> f.getDeclaringClass().getModule()) + .filter(m -> systemModules.contains(m) == false) + .findFirst() + ); + return module.orElse(null); + } + + private static boolean isTriviallyAllowed(Module requestingModule) { + if (requestingModule == null) { + logger.debug("Trivially allowed: entire call stack is in composed of classes in system modules"); + return true; + } + logger.trace("Not trivially allowed"); + return false; + } + + @Override + public String toString() { + return "PolicyManager{" + "serverPolicy=" + serverPolicy + ", pluginPolicies=" + pluginPolicies + '}'; + } +} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java index 229ccec3b8b2c..ea6603af99925 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java @@ -9,7 +9,6 @@ package org.elasticsearch.entitlement.runtime.policy; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.yaml.YamlXContent; @@ -31,8 +30,6 @@ */ public class PolicyParser { - protected static final ParseField ENTITLEMENTS_PARSEFIELD = new ParseField("entitlements"); - protected static final String entitlementPackageName = Entitlement.class.getPackage().getName(); protected final XContentParser policyParser; @@ -65,13 +62,6 @@ public Policy parsePolicy() { protected Scope parseScope(String scopeName) throws IOException { try { - if (policyParser.nextToken() != XContentParser.Token.START_OBJECT) { - throw newPolicyParserException(scopeName, "expected object [" + ENTITLEMENTS_PARSEFIELD.getPreferredName() + "]"); - } - if (policyParser.nextToken() != XContentParser.Token.FIELD_NAME - || policyParser.currentName().equals(ENTITLEMENTS_PARSEFIELD.getPreferredName()) == false) { - throw newPolicyParserException(scopeName, "expected object [" + ENTITLEMENTS_PARSEFIELD.getPreferredName() + "]"); - } if (policyParser.nextToken() != XContentParser.Token.START_ARRAY) { throw newPolicyParserException(scopeName, "expected array of "); } @@ -90,9 +80,6 @@ protected Scope parseScope(String scopeName) throws IOException { throw newPolicyParserException(scopeName, "expected closing object"); } } - if (policyParser.nextToken() != XContentParser.Token.END_OBJECT) { - throw newPolicyParserException(scopeName, "expected closing object"); - } return new Scope(scopeName, entitlements); } catch (IOException ioe) { throw new UncheckedIOException(ioe); diff --git a/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java new file mode 100644 index 0000000000000..d0f9f4f48609c --- /dev/null +++ b/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.api; + +import org.elasticsearch.entitlement.bridge.Java23EntitlementChecker; +import org.elasticsearch.entitlement.runtime.policy.PolicyManager; + +public class Java23ElasticsearchEntitlementChecker extends ElasticsearchEntitlementChecker implements Java23EntitlementChecker { + + public Java23ElasticsearchEntitlementChecker(PolicyManager policyManager) { + super(policyManager); + } + + @Override + public void check$java_lang_System$exit(Class callerClass, int status) { + // TODO: this is just an example, we shouldn't really override a method implemented in the superclass + super.check$java_lang_System$exit(callerClass, status); + } +} diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java index b21d206f3eb6a..de8280ea87fe5 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java @@ -29,11 +29,10 @@ public void testParserSyntaxFailures() { public void testEntitlementDoesNotExist() throws IOException { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - entitlements: - - does_not_exist: {} + - does_not_exist: {} """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); assertEquals( - "[3:7] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name]: " + "[2:5] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name]: " + "unknown entitlement type [does_not_exist]", ppe.getMessage() ); @@ -42,23 +41,21 @@ public void testEntitlementDoesNotExist() throws IOException { public void testEntitlementMissingParameter() throws IOException { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - entitlements: - - file: {} + - file: {} """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); assertEquals( - "[3:14] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "[2:12] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "for entitlement type [file]: missing entitlement parameter [path]", ppe.getMessage() ); ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - entitlements: - - file: - path: test-path + - file: + path: test-path """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); assertEquals( - "[5:1] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "[4:1] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "for entitlement type [file]: missing entitlement parameter [actions]", ppe.getMessage() ); @@ -67,15 +64,14 @@ public void testEntitlementMissingParameter() throws IOException { public void testEntitlementExtraneousParameter() throws IOException { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - entitlements: - - file: - path: test-path - actions: - - read - extra: test + - file: + path: test-path + actions: + - read + extra: test """.getBytes(StandardCharsets.UTF_8)), "test-failure-policy.yaml").parsePolicy()); assertEquals( - "[8:1] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "[7:1] policy parsing error for [test-failure-policy.yaml] in scope [entitlement-module-name] " + "for entitlement type [file]: extraneous entitlement parameter(s) {extra=test}", ppe.getMessage() ); diff --git a/libs/entitlement/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml b/libs/entitlement/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml index b58287cfc83b7..f13f574535bec 100644 --- a/libs/entitlement/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml +++ b/libs/entitlement/src/test/resources/org/elasticsearch/entitlement/runtime/policy/test-policy.yaml @@ -1,7 +1,6 @@ entitlement-module-name: - entitlements: - - file: - path: "test/path/to/file" - actions: - - "read" - - "write" + - file: + path: "test/path/to/file" + actions: + - "read" + - "write" diff --git a/libs/entitlement/tools/common/build.gradle b/libs/entitlement/tools/common/build.gradle index 3373a8f747430..89772b4132c5f 100644 --- a/libs/entitlement/tools/common/build.gradle +++ b/libs/entitlement/tools/common/build.gradle @@ -7,9 +7,8 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -plugins { - id 'java' -} - -group = 'org.elasticsearch.entitlement.tools' +apply plugin: 'elasticsearch.build' +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java b/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java index 866fdfe8a7f8b..a5744ed5eb6bc 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java @@ -417,43 +417,64 @@ public LatLng faceIjkToGeo(int res) { * for this FaceIJK address at a specified resolution. * * @param res The H3 resolution of the cell. - * @param start The first topological vertex to return. - * @param length The number of topological vertexes to return. */ - public CellBoundary faceIjkPentToCellBoundary(int res, int start, int length) { + public CellBoundary faceIjkPentToCellBoundary(int res) { // adjust the center point to be in an aperture 33r substrate grid // these should be composed for speed this.coord.downAp3(); this.coord.downAp3r(); // if res is Class III we need to add a cw aperture 7 to get to // icosahedral Class II - int adjRes = res; - if (H3Index.isResolutionClassIII(res)) { - this.coord.downAp7r(); - adjRes += 1; - } + final int adjRes = adjustRes(this.coord, res); + // If we're returning the entire loop, we need one more iteration in case // of a distortion vertex on the last edge - final int additionalIteration = length == Constants.NUM_PENT_VERTS ? 1 : 0; - final boolean isResolutionClassIII = H3Index.isResolutionClassIII(res); - // convert each vertex to lat/lng - // adjust the face of each vertex as appropriate and introduce - // edge-crossing vertices as needed + if (H3Index.isResolutionClassIII(res)) { + return faceIjkPentToCellBoundaryClassIII(adjRes); + } else { + return faceIjkPentToCellBoundaryClassII(adjRes); + } + } + + private CellBoundary faceIjkPentToCellBoundaryClassII(int adjRes) { + final LatLng[] points = new LatLng[Constants.NUM_PENT_VERTS]; + final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0)); + for (int vert = 0; vert < Constants.NUM_PENT_VERTS; vert++) { + // The center point is now in the same substrate grid as the origin + // cell vertices. Add the center point substate coordinates + // to each vertex to translate the vertices to that cell. + fijk.coord.reset( + VERTEX_CLASSII[vert][0] + this.coord.i, + VERTEX_CLASSII[vert][1] + this.coord.j, + VERTEX_CLASSII[vert][2] + this.coord.k + ); + fijk.coord.ijkNormalize(); + fijk.face = this.face; + + fijk.adjustPentVertOverage(adjRes); + + points[vert] = fijk.coord.ijkToGeo(fijk.face, adjRes, true); + } + return new CellBoundary(points, Constants.NUM_PENT_VERTS); + } + + private CellBoundary faceIjkPentToCellBoundaryClassIII(int adjRes) { final LatLng[] points = new LatLng[CellBoundary.MAX_CELL_BNDRY_VERTS]; int numPoints = 0; - final CoordIJK scratch = new CoordIJK(0, 0, 0); - final FaceIJK fijk = new FaceIJK(this.face, scratch); - final int[][] coord = isResolutionClassIII ? VERTEX_CLASSIII : VERTEX_CLASSII; + final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0)); final CoordIJK lastCoord = new CoordIJK(0, 0, 0); int lastFace = this.face; - for (int vert = start; vert < start + length + additionalIteration; vert++) { + for (int vert = 0; vert < Constants.NUM_PENT_VERTS + 1; vert++) { final int v = vert % Constants.NUM_PENT_VERTS; // The center point is now in the same substrate grid as the origin // cell vertices. Add the center point substate coordinates // to each vertex to translate the vertices to that cell. - scratch.reset(coord[v][0], coord[v][1], coord[v][2]); - scratch.ijkAdd(this.coord.i, this.coord.j, this.coord.k); - scratch.ijkNormalize(); + fijk.coord.reset( + VERTEX_CLASSIII[v][0] + this.coord.i, + VERTEX_CLASSIII[v][1] + this.coord.j, + VERTEX_CLASSIII[v][2] + this.coord.k + ); + fijk.coord.ijkNormalize(); fijk.face = this.face; fijk.adjustPentVertOverage(adjRes); @@ -461,7 +482,7 @@ public CellBoundary faceIjkPentToCellBoundary(int res, int start, int length) { // all Class III pentagon edges cross icosa edges // note that Class II pentagons have vertices on the edge, // not edge intersections - if (isResolutionClassIII && vert > start) { + if (vert > 0) { // find hex2d of the two vertexes on the last face final Vec2d orig2d0 = lastCoord.ijkToHex2d(); @@ -480,35 +501,17 @@ public CellBoundary faceIjkPentToCellBoundary(int res, int start, int length) { final Vec2d orig2d1 = lastCoord.ijkToHex2d(); - // find the appropriate icosa face edge vertexes - final Vec2d edge0; - final Vec2d edge1; - switch (adjacentFaceDir[fijkOrient.face][fijk.face]) { - case IJ -> { - edge0 = maxDimByCIIVec2d[adjRes][0]; - edge1 = maxDimByCIIVec2d[adjRes][1]; - } - case JK -> { - edge0 = maxDimByCIIVec2d[adjRes][1]; - edge1 = maxDimByCIIVec2d[adjRes][2]; - } - // case KI: - default -> { - assert (adjacentFaceDir[fijkOrient.face][fijk.face] == KI); - edge0 = maxDimByCIIVec2d[adjRes][2]; - edge1 = maxDimByCIIVec2d[adjRes][0]; - } - } - // find the intersection and add the lat/lng point to the result - final Vec2d inter = Vec2d.v2dIntersect(orig2d0, orig2d1, edge0, edge1); - points[numPoints++] = inter.hex2dToGeo(fijkOrient.face, adjRes, true); + final Vec2d inter = findIntersectionPoint(orig2d0, orig2d1, adjRes, adjacentFaceDir[fijkOrient.face][fijk.face]); + if (inter != null) { + points[numPoints++] = inter.hex2dToGeo(fijkOrient.face, adjRes, true); + } } // convert vertex to lat/lng and add to the result // vert == start + NUM_PENT_VERTS is only used to test for possible // intersection on last edge - if (vert < start + Constants.NUM_PENT_VERTS) { + if (vert < Constants.NUM_PENT_VERTS) { points[numPoints++] = fijk.coord.ijkToGeo(fijk.face, adjRes, true); } lastFace = fijk.face; @@ -522,10 +525,8 @@ public CellBoundary faceIjkPentToCellBoundary(int res, int start, int length) { * FaceIJK address at a specified resolution. * * @param res The H3 resolution of the cell. - * @param start The first topological vertex to return. - * @param length The number of topological vertexes to return. */ - public CellBoundary faceIjkToCellBoundary(final int res, final int start, final int length) { + public CellBoundary faceIjkToCellBoundary(final int res) { // adjust the center point to be in an aperture 33r substrate grid // these should be composed for speed this.coord.downAp3(); @@ -533,32 +534,63 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final // if res is Class III we need to add a cw aperture 7 to get to // icosahedral Class II - int adjRes = res; - if (H3Index.isResolutionClassIII(res)) { - this.coord.downAp7r(); - adjRes += 1; - } + final int adjRes = adjustRes(this.coord, res); - // If we're returning the entire loop, we need one more iteration in case - // of a distortion vertex on the last edge - final int additionalIteration = length == Constants.NUM_HEX_VERTS ? 1 : 0; - final boolean isResolutionClassIII = H3Index.isResolutionClassIII(res); // convert each vertex to lat/lng // adjust the face of each vertex as appropriate and introduce // edge-crossing vertices as needed + if (H3Index.isResolutionClassIII(res)) { + return faceIjkToCellBoundaryClassIII(adjRes); + } else { + return faceIjkToCellBoundaryClassII(adjRes); + } + } + + private static int adjustRes(CoordIJK coord, int res) { + if (H3Index.isResolutionClassIII(res)) { + coord.downAp7r(); + res += 1; + } + return res; + } + + private CellBoundary faceIjkToCellBoundaryClassII(int adjRes) { + final LatLng[] points = new LatLng[Constants.NUM_HEX_VERTS]; + final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0)); + for (int vert = 0; vert < Constants.NUM_HEX_VERTS; vert++) { + fijk.coord.reset( + VERTEX_CLASSII[vert][0] + this.coord.i, + VERTEX_CLASSII[vert][1] + this.coord.j, + VERTEX_CLASSII[vert][2] + this.coord.k + ); + fijk.coord.ijkNormalize(); + fijk.face = this.face; + + fijk.adjustOverageClassII(adjRes, false, true); + + // convert vertex to lat/lng and add to the result + // vert == start + NUM_HEX_VERTS is only used to test for possible + // intersection on last edge + points[vert] = fijk.coord.ijkToGeo(fijk.face, adjRes, true); + } + return new CellBoundary(points, Constants.NUM_HEX_VERTS); + } + + private CellBoundary faceIjkToCellBoundaryClassIII(int adjRes) { final LatLng[] points = new LatLng[CellBoundary.MAX_CELL_BNDRY_VERTS]; int numPoints = 0; - final CoordIJK scratch1 = new CoordIJK(0, 0, 0); - final FaceIJK fijk = new FaceIJK(this.face, scratch1); - final CoordIJK scratch2 = isResolutionClassIII ? new CoordIJK(0, 0, 0) : null; - final int[][] verts = isResolutionClassIII ? VERTEX_CLASSIII : VERTEX_CLASSII; + final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0)); + final CoordIJK scratch = new CoordIJK(0, 0, 0); int lastFace = -1; Overage lastOverage = Overage.NO_OVERAGE; - for (int vert = start; vert < start + length + additionalIteration; vert++) { - int v = vert % Constants.NUM_HEX_VERTS; - scratch1.reset(verts[v][0], verts[v][1], verts[v][2]); - scratch1.ijkAdd(this.coord.i, this.coord.j, this.coord.k); - scratch1.ijkNormalize(); + for (int vert = 0; vert < Constants.NUM_HEX_VERTS + 1; vert++) { + final int v = vert % Constants.NUM_HEX_VERTS; + fijk.coord.reset( + VERTEX_CLASSIII[v][0] + this.coord.i, + VERTEX_CLASSIII[v][1] + this.coord.j, + VERTEX_CLASSIII[v][2] + this.coord.k + ); + fijk.coord.ijkNormalize(); fijk.face = this.face; final Overage overage = fijk.adjustOverageClassII(adjRes, false, true); @@ -572,50 +604,20 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final projection. Note that Class II cell edges have vertices on the face edge, with no edge line intersections. */ - if (isResolutionClassIII && vert > start && fijk.face != lastFace && lastOverage != Overage.FACE_EDGE) { + if (vert > 0 && fijk.face != lastFace && lastOverage != Overage.FACE_EDGE) { // find hex2d of the two vertexes on original face final int lastV = (v + 5) % Constants.NUM_HEX_VERTS; // The center point is now in the same substrate grid as the origin // cell vertices. Add the center point substate coordinates // to each vertex to translate the vertices to that cell. - final int[] vertexLast = verts[lastV]; - final int[] vertexV = verts[v]; - scratch2.reset(vertexLast[0] + this.coord.i, vertexLast[1] + this.coord.j, vertexLast[2] + this.coord.k); - scratch2.ijkNormalize(); - final Vec2d orig2d0 = scratch2.ijkToHex2d(); - scratch2.reset(vertexV[0] + this.coord.i, vertexV[1] + this.coord.j, vertexV[2] + this.coord.k); - scratch2.ijkNormalize(); - final Vec2d orig2d1 = scratch2.ijkToHex2d(); + final Vec2d orig2d0 = orig(scratch, VERTEX_CLASSIII[lastV]); + final Vec2d orig2d1 = orig(scratch, VERTEX_CLASSIII[v]); // find the appropriate icosa face edge vertexes final int face2 = ((lastFace == this.face) ? fijk.face : lastFace); - final Vec2d edge0; - final Vec2d edge1; - switch (adjacentFaceDir[this.face][face2]) { - case IJ -> { - edge0 = maxDimByCIIVec2d[adjRes][0]; - edge1 = maxDimByCIIVec2d[adjRes][1]; - } - case JK -> { - edge0 = maxDimByCIIVec2d[adjRes][1]; - edge1 = maxDimByCIIVec2d[adjRes][2]; - } - // case KI: - default -> { - assert (adjacentFaceDir[this.face][face2] == KI); - edge0 = maxDimByCIIVec2d[adjRes][2]; - edge1 = maxDimByCIIVec2d[adjRes][0]; - } - } // find the intersection and add the lat/lng point to the result - final Vec2d inter = Vec2d.v2dIntersect(orig2d0, orig2d1, edge0, edge1); - /* - If a point of intersection occurs at a hexagon vertex, then each - adjacent hexagon edge will lie completely on a single icosahedron - face, and no additional vertex is required. - */ - final boolean isIntersectionAtVertex = orig2d0.numericallyIdentical(inter) || orig2d1.numericallyIdentical(inter); - if (isIntersectionAtVertex == false) { + final Vec2d inter = findIntersectionPoint(orig2d0, orig2d1, adjRes, adjacentFaceDir[this.face][face2]); + if (inter != null) { points[numPoints++] = inter.hex2dToGeo(this.face, adjRes, true); } } @@ -623,7 +625,7 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final // convert vertex to lat/lng and add to the result // vert == start + NUM_HEX_VERTS is only used to test for possible // intersection on last edge - if (vert < start + Constants.NUM_HEX_VERTS) { + if (vert < Constants.NUM_HEX_VERTS) { points[numPoints++] = fijk.coord.ijkToGeo(fijk.face, adjRes, true); } lastFace = fijk.face; @@ -632,6 +634,42 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final return new CellBoundary(points, numPoints); } + private Vec2d orig(CoordIJK scratch, int[] vertexLast) { + scratch.reset(vertexLast[0] + this.coord.i, vertexLast[1] + this.coord.j, vertexLast[2] + this.coord.k); + scratch.ijkNormalize(); + return scratch.ijkToHex2d(); + } + + private Vec2d findIntersectionPoint(Vec2d orig2d0, Vec2d orig2d1, int adjRes, int faceDir) { + // find the appropriate icosa face edge vertexes + final Vec2d edge0; + final Vec2d edge1; + switch (faceDir) { + case IJ -> { + edge0 = maxDimByCIIVec2d[adjRes][0]; + edge1 = maxDimByCIIVec2d[adjRes][1]; + } + case JK -> { + edge0 = maxDimByCIIVec2d[adjRes][1]; + edge1 = maxDimByCIIVec2d[adjRes][2]; + } + // case KI: + default -> { + assert (faceDir == KI); + edge0 = maxDimByCIIVec2d[adjRes][2]; + edge1 = maxDimByCIIVec2d[adjRes][0]; + } + } + // find the intersection and add the lat/lng point to the result + final Vec2d inter = Vec2d.v2dIntersect(orig2d0, orig2d1, edge0, edge1); + /* + If a point of intersection occurs at a hexagon vertex, then each + adjacent hexagon edge will lie completely on a single icosahedron + face, and no additional vertex is required. + */ + return orig2d0.numericallyIdentical(inter) || orig2d1.numericallyIdentical(inter) ? null : inter; + } + /** * compute the corresponding H3Index. * @param res The cell resolution. @@ -651,7 +689,6 @@ static long faceIjkToH3(int res, int face, CoordIJK coord) { // out of range input throw new IllegalArgumentException(" out of range input"); } - return H3Index.H3_set_base_cell(h, BaseCells.getBaseCell(face, coord)); } diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/H3.java b/libs/h3/src/main/java/org/elasticsearch/h3/H3.java index 8c0bba62cecdb..08031088728ba 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/H3.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/H3.java @@ -174,11 +174,11 @@ public static LatLng h3ToLatLng(String h3Address) { * Find the cell {@link CellBoundary} coordinates for the cell */ public static CellBoundary h3ToGeoBoundary(long h3) { - FaceIJK fijk = H3Index.h3ToFaceIjk(h3); + final FaceIJK fijk = H3Index.h3ToFaceIjk(h3); if (H3Index.H3_is_pentagon(h3)) { - return fijk.faceIjkPentToCellBoundary(H3Index.H3_get_resolution(h3), 0, Constants.NUM_PENT_VERTS); + return fijk.faceIjkPentToCellBoundary(H3Index.H3_get_resolution(h3)); } else { - return fijk.faceIjkToCellBoundary(H3Index.H3_get_resolution(h3), 0, Constants.NUM_HEX_VERTS); + return fijk.faceIjkToCellBoundary(H3Index.H3_get_resolution(h3)); } } diff --git a/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/PipelineConfigurationBridge.java b/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/PipelineConfigurationBridge.java index e146b06fe3f53..cb90d10665659 100644 --- a/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/PipelineConfigurationBridge.java +++ b/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/PipelineConfigurationBridge.java @@ -28,8 +28,12 @@ public String getId() { return delegate.getId(); } - public Map getConfigAsMap() { - return delegate.getConfigAsMap(); + public Map getConfig() { + return delegate.getConfig(); + } + + public Map getConfig(final boolean unmodifiable) { + return delegate.getConfig(unmodifiable); } @Override diff --git a/libs/plugin-analysis-api/build.gradle b/libs/plugin-analysis-api/build.gradle index 3f1670d76a0c1..41fbbdbafe998 100644 --- a/libs/plugin-analysis-api/build.gradle +++ b/libs/plugin-analysis-api/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License diff --git a/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java b/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java index 4fd471c529e75..02d0491118dc7 100644 --- a/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java +++ b/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java @@ -157,7 +157,9 @@ private static void debugThreadGroups(final ThreadGroup caller, final ThreadGrou // Returns true if the given thread is an instance of the JDK's InnocuousThread. private static boolean isInnocuousThread(Thread t) { final Class c = t.getClass(); - return c.getModule() == Object.class.getModule() && c.getName().equals("jdk.internal.misc.InnocuousThread"); + return c.getModule() == Object.class.getModule() + && (c.getName().equals("jdk.internal.misc.InnocuousThread") + || c.getName().equals("java.util.concurrent.ForkJoinWorkerThread$InnocuousForkJoinWorkerThread")); } protected void checkThreadAccess(Thread t) { @@ -184,11 +186,21 @@ protected void checkThreadAccess(Thread t) { private static final Permission MODIFY_THREADGROUP_PERMISSION = new RuntimePermission("modifyThreadGroup"); private static final Permission MODIFY_ARBITRARY_THREADGROUP_PERMISSION = new ThreadPermission("modifyArbitraryThreadGroup"); + // Returns true if the given thread is an instance of the JDK's InnocuousThread. + private static boolean isInnocuousThreadGroup(ThreadGroup t) { + final Class c = t.getClass(); + return c.getModule() == Object.class.getModule() && t.getName().equals("InnocuousForkJoinWorkerThreadGroup"); + } + protected void checkThreadGroupAccess(ThreadGroup g) { Objects.requireNonNull(g); + boolean targetThreadGroupIsInnocuous = isInnocuousThreadGroup(g); + // first, check if we can modify thread groups at all. - checkPermission(MODIFY_THREADGROUP_PERMISSION); + if (targetThreadGroupIsInnocuous == false) { + checkPermission(MODIFY_THREADGROUP_PERMISSION); + } // check the threadgroup, if its our thread group or an ancestor, its fine. final ThreadGroup source = Thread.currentThread().getThreadGroup(); @@ -196,7 +208,7 @@ protected void checkThreadGroupAccess(ThreadGroup g) { if (source == null) { return; // we are a dead thread, do nothing - } else if (source.parentOf(target) == false) { + } else if (source.parentOf(target) == false && targetThreadGroupIsInnocuous == false) { checkPermission(MODIFY_ARBITRARY_THREADGROUP_PERMISSION); } } diff --git a/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java b/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java index b94639414ffe5..69c6973f57cdf 100644 --- a/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java +++ b/libs/secure-sm/src/test/java/org/elasticsearch/secure_sm/SecureSMTests.java @@ -14,7 +14,10 @@ import java.security.Permission; import java.security.Policy; import java.security.ProtectionDomain; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; /** Simple tests for SecureSM */ public class SecureSMTests extends TestCase { @@ -128,4 +131,12 @@ public void run() { t1.join(); assertTrue(interrupted1.get()); } + + public void testParallelStreamThreadGroup() throws Exception { + List list = new ArrayList<>(); + for (int i = 0; i < 100; ++i) { + list.add(i); + } + list.parallelStream().collect(Collectors.toSet()); + } } diff --git a/libs/simdvec/build.gradle b/libs/simdvec/build.gradle index 02f960130e690..95b8ddf28cf2f 100644 --- a/libs/simdvec/build.gradle +++ b/libs/simdvec/build.gradle @@ -7,7 +7,6 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask apply plugin: 'elasticsearch.publish' @@ -33,7 +32,7 @@ tasks.matching { it.name == "compileMain21Java" }.configureEach { } tasks.named('test').configure { - if (BuildParams.getRuntimeJavaVersion().majorVersion.toInteger() >= 21) { + if (buildParams.getRuntimeJavaVersion().map{ it.majorVersion.toInteger() }.get() >= 21) { jvmArgs '--add-modules=jdk.incubator.vector' } } diff --git a/libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java b/libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java index de2cb9042610b..2f4743a47a14a 100644 --- a/libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java +++ b/libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java @@ -51,6 +51,8 @@ public static long ipByteBinByte(byte[] q, byte[] d) { /** * Compute the inner product of two vectors, where the query vector is a byte vector and the document vector is a bit vector. * This will return the sum of the query vector values using the document vector as a mask. + * When comparing the bits with the bytes, they are done in "big endian" order. For example, if the byte vector + * is [1, 2, 3, 4, 5, 6, 7, 8] and the bit vector is [0b10000000], the inner product will be 1.0. * @param q the query vector * @param d the document vector * @return the inner product of the two vectors @@ -63,9 +65,9 @@ public static int ipByteBit(byte[] q, byte[] d) { // now combine the two vectors, summing the byte dimensions where the bit in d is `1` for (int i = 0; i < d.length; i++) { byte mask = d[i]; - for (int j = 0; j < Byte.SIZE; j++) { + for (int j = Byte.SIZE - 1; j >= 0; j--) { if ((mask & (1 << j)) != 0) { - result += q[i * Byte.SIZE + j]; + result += q[i * Byte.SIZE + Byte.SIZE - 1 - j]; } } } @@ -75,6 +77,8 @@ public static int ipByteBit(byte[] q, byte[] d) { /** * Compute the inner product of two vectors, where the query vector is a float vector and the document vector is a bit vector. * This will return the sum of the query vector values using the document vector as a mask. + * When comparing the bits with the floats, they are done in "big endian" order. For example, if the float vector + * is [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0] and the bit vector is [0b10000000], the inner product will be 1.0. * @param q the query vector * @param d the document vector * @return the inner product of the two vectors @@ -86,9 +90,9 @@ public static float ipFloatBit(float[] q, byte[] d) { float result = 0; for (int i = 0; i < d.length; i++) { byte mask = d[i]; - for (int j = 0; j < Byte.SIZE; j++) { + for (int j = Byte.SIZE - 1; j >= 0; j--) { if ((mask & (1 << j)) != 0) { - result += q[i * Byte.SIZE + j]; + result += q[i * Byte.SIZE + Byte.SIZE - 1 - j]; } } } diff --git a/libs/simdvec/src/test/java/org/elasticsearch/simdvec/ESVectorUtilTests.java b/libs/simdvec/src/test/java/org/elasticsearch/simdvec/ESVectorUtilTests.java index e9e0fd58f7638..368898b934c87 100644 --- a/libs/simdvec/src/test/java/org/elasticsearch/simdvec/ESVectorUtilTests.java +++ b/libs/simdvec/src/test/java/org/elasticsearch/simdvec/ESVectorUtilTests.java @@ -21,6 +21,22 @@ public class ESVectorUtilTests extends BaseVectorizationTests { static final ESVectorizationProvider defaultedProvider = BaseVectorizationTests.defaultProvider(); static final ESVectorizationProvider defOrPanamaProvider = BaseVectorizationTests.maybePanamaProvider(); + public void testIpByteBit() { + byte[] q = new byte[16]; + byte[] d = new byte[] { (byte) Integer.parseInt("01100010", 2), (byte) Integer.parseInt("10100111", 2) }; + random().nextBytes(q); + int expected = q[1] + q[2] + q[6] + q[8] + q[10] + q[13] + q[14] + q[15]; + assertEquals(expected, ESVectorUtil.ipByteBit(q, d)); + } + + public void testIpFloatBit() { + float[] q = new float[16]; + byte[] d = new byte[] { (byte) Integer.parseInt("01100010", 2), (byte) Integer.parseInt("10100111", 2) }; + random().nextFloat(); + float expected = q[1] + q[2] + q[6] + q[8] + q[10] + q[13] + q[14] + q[15]; + assertEquals(expected, ESVectorUtil.ipFloatBit(q, d), 1e-6); + } + public void testBitAndCount() { testBasicBitAndImpl(ESVectorUtil::andBitCountLong); } diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index a1ab6363166cb..94fdddf6d711a 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -20,7 +18,7 @@ esplugin { restResources { restApi { - include '_common', 'indices', 'cluster', 'index', 'search', 'nodes', 'bulk', 'scripts_painless_execute', 'put_script' + include 'capabilities', '_common', 'indices', 'cluster', 'index', 'search', 'nodes', 'bulk', 'scripts_painless_execute', 'put_script' } restTests { // Pulls in all aggregation tests from core AND the forwards v7's core for forwards compatibility @@ -28,7 +26,7 @@ restResources { } } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index 2b4fea0327e86..203105edc5a24 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -177,65 +178,66 @@ public void collect(int doc, long bucket) throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { // Buckets are ordered into groups - [keyed filters] [key1&key2 intersects] - int maxOrd = owningBucketOrds.length * totalNumKeys; - int totalBucketsToBuild = 0; - for (int ord = 0; ord < maxOrd; ord++) { + long maxOrd = owningBucketOrds.size() * totalNumKeys; + long totalBucketsToBuild = 0; + for (long ord = 0; ord < maxOrd; ord++) { if (bucketDocCount(ord) > 0) { totalBucketsToBuild++; } } - long[] bucketOrdsToBuild = new long[totalBucketsToBuild]; - int builtBucketIndex = 0; - for (int ord = 0; ord < maxOrd; ord++) { - if (bucketDocCount(ord) > 0) { - bucketOrdsToBuild[builtBucketIndex++] = ord; - } - } - assert builtBucketIndex == totalBucketsToBuild; - builtBucketIndex = 0; - var bucketSubAggs = buildSubAggsForBuckets(bucketOrdsToBuild); - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int owningBucketOrdIdx = 0; owningBucketOrdIdx < owningBucketOrds.length; owningBucketOrdIdx++) { - List buckets = new ArrayList<>(filters.length); - for (int i = 0; i < keys.length; i++) { - long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], i); - long docCount = bucketDocCount(bucketOrd); - // Empty buckets are not returned because this aggregation will commonly be used under a - // a date-histogram where we will look for transactions over time and can expect many - // empty buckets. - if (docCount > 0) { - InternalAdjacencyMatrix.InternalBucket bucket = new InternalAdjacencyMatrix.InternalBucket( - keys[i], - docCount, - bucketSubAggs.apply(builtBucketIndex++) - ); - buckets.add(bucket); + try (LongArray bucketOrdsToBuild = bigArrays().newLongArray(totalBucketsToBuild)) { + int[] builtBucketIndex = new int[] { 0 }; + for (int ord = 0; ord < maxOrd; ord++) { + if (bucketDocCount(ord) > 0) { + bucketOrdsToBuild.set(builtBucketIndex[0]++, ord); } } - int pos = keys.length; - for (int i = 0; i < keys.length; i++) { - for (int j = i + 1; j < keys.length; j++) { - long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], pos); + assert builtBucketIndex[0] == totalBucketsToBuild; + builtBucketIndex[0] = 0; + var bucketSubAggs = buildSubAggsForBuckets(bucketOrdsToBuild); + InternalAggregation[] aggregations = buildAggregations(Math.toIntExact(owningBucketOrds.size()), owningBucketOrdIdx -> { + List buckets = new ArrayList<>(filters.length); + for (int i = 0; i < keys.length; i++) { + long bucketOrd = bucketOrd(owningBucketOrds.get(owningBucketOrdIdx), i); long docCount = bucketDocCount(bucketOrd); - // Empty buckets are not returned due to potential for very sparse matrices + // Empty buckets are not returned because this aggregation will commonly be used under a + // a date-histogram where we will look for transactions over time and can expect many + // empty buckets. if (docCount > 0) { - String intersectKey = keys[i] + separator + keys[j]; + checkRealMemoryCBForInternalBucket(); InternalAdjacencyMatrix.InternalBucket bucket = new InternalAdjacencyMatrix.InternalBucket( - intersectKey, + keys[i], docCount, - bucketSubAggs.apply(builtBucketIndex++) + bucketSubAggs.apply(builtBucketIndex[0]++) ); buckets.add(bucket); } - pos++; } - } - results[owningBucketOrdIdx] = new InternalAdjacencyMatrix(name, buckets, metadata()); + int pos = keys.length; + for (int i = 0; i < keys.length; i++) { + for (int j = i + 1; j < keys.length; j++) { + long bucketOrd = bucketOrd(owningBucketOrds.get(owningBucketOrdIdx), pos); + long docCount = bucketDocCount(bucketOrd); + // Empty buckets are not returned due to potential for very sparse matrices + if (docCount > 0) { + String intersectKey = keys[i] + separator + keys[j]; + InternalAdjacencyMatrix.InternalBucket bucket = new InternalAdjacencyMatrix.InternalBucket( + intersectKey, + docCount, + bucketSubAggs.apply(builtBucketIndex[0]++) + ); + buckets.add(bucket); + } + pos++; + } + } + return new InternalAdjacencyMatrix(name, buckets, metadata()); + }); + assert builtBucketIndex[0] == totalBucketsToBuild; + return aggregations; } - assert builtBucketIndex == totalBucketsToBuild; - return results; } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 6f36f1f17bf8b..999f790ee8117 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -33,7 +33,7 @@ public class InternalAdjacencyMatrix extends InternalMultiBucketAggregation implements AdjacencyMatrix { - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements AdjacencyMatrix.Bucket { + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements AdjacencyMatrix.Bucket { private final String key; private final long docCount; @@ -81,14 +81,12 @@ public InternalAggregations getAggregations() { return aggregations; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(CommonFields.KEY.getPreferredName(), key); builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } @Override @@ -237,7 +235,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (InternalBucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params); } builder.endArray(); return builder; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index d4e1c2928c441..6add1b0ac4a13 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -141,7 +141,7 @@ public final LeafBucketCollector getLeafCollector(AggregationExecutionContext ag protected final InternalAggregation[] buildAggregations( LongKeyedBucketOrds bucketOrds, LongToIntFunction roundingIndexFor, - long[] owningBucketOrds + LongArray owningBucketOrds ) throws IOException { return buildAggregationsForVariableBuckets( owningBucketOrds, @@ -324,7 +324,7 @@ private void increaseRoundingIfNeeded(long rounded) { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregations(bucketOrds, l -> roundingIdx, owningBucketOrds); } @@ -594,7 +594,7 @@ private void rebucket() { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { /* * Rebucket before building the aggregation to build as small as result * as possible. diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 42aa79f990fc6..edb7ec4cffce7 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -99,8 +99,7 @@ public Object getKey() { return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params, DocValueFormat format) throws IOException { String keyAsString = format.format(key).toString(); builder.startObject(); if (format != DocValueFormat.RAW) { @@ -110,7 +109,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } @Override @@ -597,7 +595,7 @@ private BucketReduceResult mergeConsecutiveBuckets( public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, format); } builder.endArray(); builder.field("interval", getInterval().toString()); diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index c4cdacd135cb4..c4669b1c25224 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -34,26 +34,23 @@ public class InternalTimeSeries extends InternalMultiBucketAggregation buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { - buckets.add(new InternalTimeSeries.InternalBucket(in, keyed)); + buckets.add(new InternalTimeSeries.InternalBucket(in)); } this.buckets = buckets; this.bucketMap = null; @@ -162,7 +156,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (InternalBucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, keyed); } if (keyed) { builder.endObject(); @@ -252,14 +246,14 @@ public InternalTimeSeries create(List buckets) { @Override public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) { - return new InternalBucket(prototype.key, prototype.docCount, aggregations, prototype.keyed); + return new InternalBucket(prototype.key, prototype.docCount, aggregations); } private InternalBucket reduceBucket(List buckets, AggregationReduceContext context) { InternalTimeSeries.InternalBucket reduced = null; for (InternalTimeSeries.InternalBucket bucket : buckets) { if (reduced == null) { - reduced = new InternalTimeSeries.InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed); + reduced = new InternalTimeSeries.InternalBucket(bucket.key, bucket.docCount, bucket.aggregations); } else { reduced.docCount += bucket.docCount; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index c74637330dd7a..63472bca1d9ac 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -11,6 +11,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.RoutingPathFields; @@ -30,6 +32,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -67,42 +70,39 @@ public TimeSeriesAggregator( } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { BytesRef spare = new BytesRef(); - InternalTimeSeries.InternalBucket[][] allBucketsPerOrd = new InternalTimeSeries.InternalBucket[owningBucketOrds.length][]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - List buckets = new ArrayList<>(); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - ordsEnum.readValue(spare); - InternalTimeSeries.InternalBucket bucket = new InternalTimeSeries.InternalBucket( - BytesRef.deepCopyOf(spare), // Closing bucketOrds will corrupt the bytes ref, so need to make a deep copy here. - docCount, - null, - keyed - ); - bucket.bucketOrd = ordsEnum.ord(); - buckets.add(bucket); - if (buckets.size() >= size) { - break; + try (ObjectArray allBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size())) { + for (long ordIdx = 0; ordIdx < allBucketsPerOrd.size(); ordIdx++) { + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx)); + List buckets = new ArrayList<>(); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + ordsEnum.readValue(spare); + checkRealMemoryCBForInternalBucket(); + InternalTimeSeries.InternalBucket bucket = new InternalTimeSeries.InternalBucket( + BytesRef.deepCopyOf(spare), // Closing bucketOrds will corrupt the bytes ref, so need to make a deep copy here. + docCount, + null + ); + bucket.bucketOrd = ordsEnum.ord(); + buckets.add(bucket); + if (buckets.size() >= size) { + break; + } } + // NOTE: after introducing _tsid hashing time series are sorted by (_tsid hash, @timestamp) instead of (_tsid, timestamp). + // _tsid hash and _tsid might sort differently, and out of order data might result in incorrect buckets due to _tsid value + // changes not matching _tsid hash changes. Changes in _tsid hash are handled creating a new bucket as a result of making + // the assumption that sorting data results in new buckets whenever there is a change in _tsid hash. This is no true anymore + // because we collect data sorted on (_tsid hash, timestamp) but build aggregation results sorted by (_tsid, timestamp). + buckets.sort(Comparator.comparing(bucket -> bucket.key)); + allBucketsPerOrd.set(ordIdx, buckets.toArray(new InternalTimeSeries.InternalBucket[0])); } - // NOTE: after introducing _tsid hashing time series are sorted by (_tsid hash, @timestamp) instead of (_tsid, timestamp). - // _tsid hash and _tsid might sort differently, and out of order data might result in incorrect buckets due to _tsid value - // changes not matching _tsid hash changes. Changes in _tsid hash are handled creating a new bucket as a result of making - // the assumption that sorting data results in new buckets whenever there is a change in _tsid hash. This is no true anymore - // because we collect data sorted on (_tsid hash, timestamp) but build aggregation results sorted by (_tsid, timestamp). - buckets.sort(Comparator.comparing(bucket -> bucket.key)); - allBucketsPerOrd[ordIdx] = buckets.toArray(new InternalTimeSeries.InternalBucket[0]); - } - buildSubAggsForAllBuckets(allBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); + buildSubAggsForAllBuckets(allBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - result[ordIdx] = buildResult(allBucketsPerOrd[ordIdx]); + return buildAggregations(Math.toIntExact(allBucketsPerOrd.size()), ordIdx -> buildResult(allBucketsPerOrd.get(ordIdx))); } - return result; } @Override @@ -185,7 +185,7 @@ public void collect(int doc, long bucket) throws IOException { } InternalTimeSeries buildResult(InternalTimeSeries.InternalBucket[] topBuckets) { - return new InternalTimeSeries(name, List.of(topBuckets), keyed, metadata()); + return new InternalTimeSeries(name, Arrays.asList(topBuckets), keyed, metadata()); } @FunctionalInterface diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java index e61c02e0b9cd2..3b67d09c0d6a1 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java @@ -49,7 +49,7 @@ private List randomBuckets(boolean keyed, InternalAggregations a } try { var key = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); - bucketList.add(new InternalBucket(key, docCount, aggregations, keyed)); + bucketList.add(new InternalBucket(key, docCount, aggregations)); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -108,10 +108,10 @@ public void testReduceSimple() { InternalTimeSeries first = new InternalTimeSeries( "ts", List.of( - new InternalBucket(new BytesRef("1"), 3, InternalAggregations.EMPTY, false), - new InternalBucket(new BytesRef("10"), 6, InternalAggregations.EMPTY, false), - new InternalBucket(new BytesRef("2"), 2, InternalAggregations.EMPTY, false), - new InternalBucket(new BytesRef("9"), 5, InternalAggregations.EMPTY, false) + new InternalBucket(new BytesRef("1"), 3, InternalAggregations.EMPTY), + new InternalBucket(new BytesRef("10"), 6, InternalAggregations.EMPTY), + new InternalBucket(new BytesRef("2"), 2, InternalAggregations.EMPTY), + new InternalBucket(new BytesRef("9"), 5, InternalAggregations.EMPTY) ), false, Map.of() @@ -119,8 +119,8 @@ public void testReduceSimple() { InternalTimeSeries second = new InternalTimeSeries( "ts", List.of( - new InternalBucket(new BytesRef("2"), 1, InternalAggregations.EMPTY, false), - new InternalBucket(new BytesRef("3"), 3, InternalAggregations.EMPTY, false) + new InternalBucket(new BytesRef("2"), 1, InternalAggregations.EMPTY), + new InternalBucket(new BytesRef("3"), 3, InternalAggregations.EMPTY) ), false, Map.of() @@ -128,9 +128,9 @@ public void testReduceSimple() { InternalTimeSeries third = new InternalTimeSeries( "ts", List.of( - new InternalBucket(new BytesRef("1"), 2, InternalAggregations.EMPTY, false), - new InternalBucket(new BytesRef("3"), 4, InternalAggregations.EMPTY, false), - new InternalBucket(new BytesRef("9"), 4, InternalAggregations.EMPTY, false) + new InternalBucket(new BytesRef("1"), 2, InternalAggregations.EMPTY), + new InternalBucket(new BytesRef("3"), 4, InternalAggregations.EMPTY), + new InternalBucket(new BytesRef("9"), 4, InternalAggregations.EMPTY) ), false, Map.of() diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index d9a4023457126..493b4bdc81860 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -176,19 +176,19 @@ public void testMultiBucketAggregationAsSubAggregation() throws IOException { InternalDateHistogram byTimeStampBucket = ts.getBucketByKey("{dim1=aaa, dim2=xxx}").getAggregations().get("by_timestamp"); assertThat( byTimeStampBucket.getBuckets(), - contains(new InternalDateHistogram.Bucket(startTime, 2, false, null, InternalAggregations.EMPTY)) + contains(new InternalDateHistogram.Bucket(startTime, 2, null, InternalAggregations.EMPTY)) ); assertThat(ts.getBucketByKey("{dim1=aaa, dim2=yyy}").docCount, equalTo(2L)); byTimeStampBucket = ts.getBucketByKey("{dim1=aaa, dim2=yyy}").getAggregations().get("by_timestamp"); assertThat( byTimeStampBucket.getBuckets(), - contains(new InternalDateHistogram.Bucket(startTime, 2, false, null, InternalAggregations.EMPTY)) + contains(new InternalDateHistogram.Bucket(startTime, 2, null, InternalAggregations.EMPTY)) ); assertThat(ts.getBucketByKey("{dim1=bbb, dim2=zzz}").docCount, equalTo(4L)); byTimeStampBucket = ts.getBucketByKey("{dim1=bbb, dim2=zzz}").getAggregations().get("by_timestamp"); assertThat( byTimeStampBucket.getBuckets(), - contains(new InternalDateHistogram.Bucket(startTime, 4, false, null, InternalAggregations.EMPTY)) + contains(new InternalDateHistogram.Bucket(startTime, 4, null, InternalAggregations.EMPTY)) ); }; diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/random_sampler.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/random_sampler.yml index 5b2c2dc379cb9..4d8efe2a6f9d8 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/random_sampler.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/random_sampler.yml @@ -142,6 +142,66 @@ setup: } - match: { aggregations.sampled.mean.value: 1.0 } --- +"Test random_sampler aggregation with scored subagg": + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ random_sampler_with_scored_subaggs ] + test_runner_features: capabilities + reason: "Support for random sampler with scored subaggs capability required" + - do: + search: + index: data + size: 0 + body: > + { + "query": { + "function_score": { + "random_score": {} + } + }, + "aggs": { + "sampled": { + "random_sampler": { + "probability": 0.5 + }, + "aggs": { + "top": { + "top_hits": {} + } + } + } + } + } + - is_true: aggregations.sampled.top.hits + - do: + search: + index: data + size: 0 + body: > + { + "query": { + "function_score": { + "random_score": {} + } + }, + "aggs": { + "sampled": { + "random_sampler": { + "probability": 1.0 + }, + "aggs": { + "top": { + "top_hits": {} + } + } + } + } + } + - match: { aggregations.sampled.top.hits.total.value: 6 } + - is_true: aggregations.sampled.top.hits.hits.0._score +--- "Test random_sampler aggregation with poor settings": - requires: cluster_features: ["gte_v8.2.0"] diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index b2b7f86ce34e6..e091f0175009e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -28,6 +28,8 @@ */ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory { + private final boolean noSubMatches; + private final boolean noOverlappingMatches; private final HyphenationTree hyphenationTree; HyphenationCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { @@ -46,6 +48,9 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW } catch (Exception e) { throw new IllegalArgumentException("Exception while reading hyphenation_patterns_path.", e); } + + noSubMatches = settings.getAsBoolean("no_sub_matches", false); + noOverlappingMatches = settings.getAsBoolean("no_overlapping_matches", false); } @Override @@ -57,7 +62,9 @@ public TokenStream create(TokenStream tokenStream) { minWordSize, minSubwordSize, maxSubwordSize, - onlyLongestMatch + onlyLongestMatch, + noSubMatches, + noOverlappingMatches ); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java index ad98c2f8ffe1e..69dd8e91b52b2 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java @@ -31,6 +31,9 @@ import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -42,6 +45,7 @@ import static org.hamcrest.Matchers.instanceOf; public class CompoundAnalysisTests extends ESTestCase { + public void testDefaultsCompoundAnalysis() throws Exception { Settings settings = getJsonSettings(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); @@ -63,6 +67,44 @@ public void testDictionaryDecompounder() throws Exception { assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); } + public void testHyphenationDecompoundingAnalyzerOnlyLongestMatch() throws Exception { + Settings[] settingsArr = new Settings[] { getJsonSettings(), getYamlSettings() }; + for (Settings settings : settingsArr) { + List terms = analyze(settings, "hyphenationDecompoundingAnalyzerOnlyLongestMatch", "kaffeemaschine fussballpumpe"); + MatcherAssert.assertThat( + terms, + hasItems("kaffeemaschine", "kaffee", "fee", "maschine", "fussballpumpe", "fussball", "ballpumpe", "pumpe") + ); + } + assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); + } + + /** + * For example given a word list of: ["kaffee", "fee", "maschine"] + * no_sub_matches should prevent the token "fee" as a token in "kaffeemaschine". + */ + public void testHyphenationDecompoundingAnalyzerNoSubMatches() throws Exception { + Settings[] settingsArr = new Settings[] { getJsonSettings(), getYamlSettings() }; + for (Settings settings : settingsArr) { + List terms = analyze(settings, "hyphenationDecompoundingAnalyzerNoSubMatches", "kaffeemaschine fussballpumpe"); + MatcherAssert.assertThat(terms, hasItems("kaffeemaschine", "kaffee", "maschine", "fussballpumpe", "fussball", "ballpumpe")); + } + assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); + } + + /** + * For example given a word list of: ["fuss", "fussball", "ballpumpe", "ball", "pumpe"] + * no_overlapping_matches should prevent the token "ballpumpe" as a token in "fussballpumpe. + */ + public void testHyphenationDecompoundingAnalyzerNoOverlappingMatches() throws Exception { + Settings[] settingsArr = new Settings[] { getJsonSettings(), getYamlSettings() }; + for (Settings settings : settingsArr) { + List terms = analyze(settings, "hyphenationDecompoundingAnalyzerNoOverlappingMatches", "kaffeemaschine fussballpumpe"); + MatcherAssert.assertThat(terms, hasItems("kaffeemaschine", "kaffee", "maschine", "fussballpumpe", "fussball", "pumpe")); + } + assertWarnings("Setting [version] on analysis component [custom7] has no effect and is deprecated"); + } + private List analyze(Settings settings, String analyzerName, String text) throws IOException { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); AnalysisModule analysisModule = createAnalysisModule(settings); @@ -92,20 +134,25 @@ public Map> getTokenFilters() { } private Settings getJsonSettings() throws IOException { - String json = "/org/elasticsearch/analysis/common/test1.json"; - return Settings.builder() - .loadFromStream(json, getClass().getResourceAsStream(json), false) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); + return getSettings("/org/elasticsearch/analysis/common/test1.json"); } private Settings getYamlSettings() throws IOException { - String yaml = "/org/elasticsearch/analysis/common/test1.yml"; + return getSettings("/org/elasticsearch/analysis/common/test1.yml"); + } + + private Settings getSettings(String filePath) throws IOException { + String hypenationRulesFileName = "de_DR.xml"; + InputStream hypenationRules = getClass().getResourceAsStream(hypenationRulesFileName); + Path home = createTempDir(); + Path config = home.resolve("config"); + Files.createDirectory(config); + Files.copy(hypenationRules, config.resolve(hypenationRulesFileName)); + return Settings.builder() - .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) + .loadFromStream(filePath, getClass().getResourceAsStream(filePath), false) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } } diff --git a/modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/de_DR.xml b/modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/de_DR.xml new file mode 100644 index 0000000000000..37bcde1246a81 --- /dev/null +++ b/modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/de_DR.xml @@ -0,0 +1,1130 @@ + + + + + + + + + + + + aA + bB + cC + dD + eE + fF + gG + hH + iI + jJ + kK + lL + mM + nN + oO + pP + qQ + rR + sS + tT + uU + vV + wW + xX + yY + zZ + �� + �� + �� + �� + � + + + + .aa6l .ab3a4s .ab3ei .abi2 .ab3it .ab1l .ab1r .ab3u .ad3o4r .alti6 + .ana3c .an5alg .an1e + .ang8s2t1 + .an1s .ap1p .ar6sc .ar6ta .ar6tei .as2z + .au2f1 .au2s3 .be5erb .be3na .ber6t5r .bie6r5 .bim6s5t .brot3 .bru6s + .ch6 .che6f5 .da8c .da2r .dar5in .dar5u .den6ka .de5r6en .des6pe + .de8spo .de3sz .dia3s4 .dien4 .dy2s1 .ehren5 .eine6 .ei6n5eh .ei8nen + .ein5sa .en6der .en6d5r .en3k4 .en8ta8 .en8tei .en4t3r .epo1 .er6ban + .er6b5ei .er6bla .er6d5um .er3ei .er5er .er3in .er3o4b .erwi5s .es1p + .es8t1l .es8t1n + .ex1a2 .ex3em .fal6sc .fe6st5a .flu4g3 .furch8 .ga6ner .ge3n4a + .ge5r� + .ges6 + .halb5 .halbe6 .hal6br .haup4 .hau4t .heima6 .he4r3e + .her6za .he5x .hin3 .hir8sc .ho4c .hu3sa .hy5o .ibe5 .ima6ge .in1 + .ini6 .is5chi .jagd5 .kal6k5o .ka6ph .ki4e .kop6f3 .kraf6 .k�5ra + .lab6br .liie6 .lo6s5k .l�4s3t .ma5d .mi2t1 .no6th .no6top + .obe8ri .ob1l .obs2 .ob6st5e .or3c .ort6s5e .ost3a .oste8r .pe4re + .pe3ts .ph6 .po8str .rau4m3 .re5an .ro8q .ru5the .r�5be + + .sch8 .se6e .se5n6h .se5ra .si2e .spi6ke .st4 .sy2n + .tages5 .tan6kl .ta8th .te6e .te8str .to6der .to8nin .to6we .um1 + .umpf4 .un1 .une6 .unge5n .ur1c .ur5en .ve6rin .vora8 .wah6l5 .we8ges + .we8s2t .wes3te + .wo6r .wor3a .wun4s .zi4e .zuch8 .�nde8re .�ch8 aa1c aa2gr + aal5e aa6r5a a5arti aa2s1t aat2s 6aba ab3art 1abdr 6abel aben6dr + ab5erk ab5err ab5esse 1abf 1abg 1abh� ab1ir 1abko a1bl ab1la + 5ablag a6bla� ab4ler ab1lu a8bl� 5a6bl� abma5c + 1abn ab1ra ab1re 5a6brec ab1ro + ab1s + ab8sk abs2z 3abtei ab1ur 1abw + 5abze 5abzu ab1�n ab�u8 a4ce. a5chal ach5art ach5au a1che + a8chent ach6er. a6ch5erf a1chi ach1l ach3m ach5n a1cho ach3re a1chu + ach1w a1chy ach5�f ack1o acks6t ack5sta a1d 8ad. a6d5ac ad3ant + ad8ar 5addi a8dein ade5o8 adi5en 1adj 1adle ad1op a2dre 3adres adt1 + 1adv a6d� a1e2d ae1r a1er. 1aero 8afa a3fal af1an a5far a5fat + af1au a6fentl a2f1ex af1fr af5rau af1re 1afri af6tent af6tra aft5re + a6f5um 8af� ag5abe 5a4gent ag8er ages5e 1aggr ag5las ag1lo a1gn + ag2ne 1agog a6g5und a1ha a1he ah5ein a4h3erh a1hi ahl1a ah1le ah4m3ar + ahn1a a5ho ahra6 ahr5ab ah1re ah8rei ahren8s ahre4s3 ahr8ti ah1ru a1hu + ah8� ai3d2s ai1e aif6 a3inse ai4re. a5isch. ais8e a3ismu ais6n + aiso6 a1j 1akad a4kade a1ke a1ki 1akko 5akro1 a5lal al5ans 3al8arm + al8beb al8berw alb5la 3album al1c a1le a6l5e6be a4l3ein a8lel a8lerb + a8lerh a6lert 5a6l5eth 1algi al4gli al3int al4lab al8lan al4l3ar + alle3g a1lo a4l5ob al6schm al4the + + al4t3re 8a1lu alu5i a6lur + alu3ta a1l� a6mate 8ame. 5a6meise am6m5ei am6mum am2n ampf3a + am6schw am2ta a1mu a1m� a3nac a1nad anadi5e an3ako an3alp 3analy + an3ame an3ara a1nas an5asti a1nat anat5s an8dent ande4s3 an1ec an5eis + an1e2k 4aner. a6n5erd a8nerf a6n5erke 1anfa 5anfert 1anf� 3angab + 5angebo an3gli ang6lis an2gn 3angri ang5t6 5anh� ani5g ani4ka + an5i8on an1kl an6kno an4kro 1anl anma5c anmar4 3annah anne4s3 a1no + 5a6n1o2d 5a6n3oma 5a6nord 1anr an1sa 5anschl an4soz an1st 5anstal + an1s2z 5antenn an1th 5anw� a5ny an4z3ed 5anzeig 5anzieh 3anzug + an1� 5an�s a1n� an�8d a1os a1pa 3apfel a2ph1t + aph5�6 a1pi 8apl apo1c apo1s + a6pos2t + a6poth 1appa ap1pr a1pr + a5p� a3p� a1ra a4r3af ar3all 3arbei 2arbt ar1c 2a1re ar3ein + ar2gl 2a1ri ari5es ar8kers ar6les ar4nan ar5o6ch ar1o2d a1rol ar3ony + a8ror a3ros ar5ox ar6schl 8artei ar6t5ri a1ru a1ry 1arzt arz1w + ar8z� ar�8m ar�6 ar5�m ar1�2 a1sa a6schec + asch5l asch3m a6schn a3s4hi as1pa asp5l + + as5tev 1asth + + a1str ast3re 8a1ta ata5c ata3la a6tapf ata5pl a1te a6teli aten5a + ate5ran 6atf 6atg a1th at3hal 1athl 2a1ti 5atlant 3atlas 8atmus 6atn + a1to a6t5ops ato6ra a6t5ort. 4a1tr a6t5ru at2t1h at5t6h� 6a1tu + atz1w a1t� a1t� au1a au6bre auch3a au1e aue4l 5aufent + 3auff� 3aufga 1aufn auf1t 3auftr 1aufw 3auge. au4kle aule8s 6aum + au8mar aum5p 1ausb 3ausd 1ausf 1ausg au8sin + + au4sta 1ausw 1ausz + aut5eng au1th 1auto au�e8 a1v ave5r6a aver6i a1w a6wes a1x + a2xia a6xio a1ya a1z azi5er. 8a� 1ba 8ba8del ba1la ba1na + ban6k5r ba5ot bardi6n ba1ro basten6 bau3sp 2b1b bb6le b2bli 2b1c 2b1d + 1be be1a be8at. be1ch 8becht 8becke. be5el be1en bee8rei be5eta bef2 + 8beff be1g2 beh�8 bei1s 6b5eisen bei3tr b8el bel8o belu3t be3nac + bend6o be6ners be6nerw be4nor ben4se6 bens5el be1n� be1n� + be1o2 b8er. be1ra be8rac ber8gab. ber1r be1r� bes8c bes5erh + bes2p be5tha bet5sc be1un be1ur 8bex be6zwec 2b1f8 + + 2b1g2 + bga2s5 bge1 2b1h bhole6 1bi bi1bl b6ie bi1el bi1la bil�5 bi1na + bi4nok + + bi6stu bi5tr bit4t5r b1j 2b1k2 bk�6 bl8 b6la. + 6b1lad 6blag 8blam 1blat b8latt 3blau. b6lav 3ble. b1leb b1led + 8b1leg 8b1leh 8bleid 8bleih 6b3lein + + ble4m3o 4blich b4lind + 8bling b2lio 5blit b4litz b1loh 8b1los 1blu 5blum 2blun blut3a blut5sc + 3bl� bl�s5c 5bl� 3bl� bl�8sc 2b1m 2b1n 1bo + bo1ch bo5d6s boe5 8boff 8bonk bo1ra b1ort 2b1p2 b1q 1br brail6 brast8 + bre4a b5red 8bref 8b5riem b6riga bro1s b1rup b2ruz 8br�h + br�s5c 8bs b1sa b8sang b2s1ar b1sc bs3erl bs3erz b8sof b1s2p + bst1h b3stru b5st� b6sun 2b1t b2t1h 1bu bu1ie bul6k b8ure bu6sin + 6b1v 2b1w 1by1 by6te. 8b1z + + 1b� b5�6s5 1b� + b6�5bere b�ge6 b�gel5e b�r6sc 1ca cag6 ca5la ca6re + ca5y c1c 1ce celi4c celich5 ce1ro c8h 2ch. 1chae ch1ah ch3akt cha6mer + 8chanz 5chara 3chari 5chato 6chb 1chef 6chei ch3eil ch3eis 6cherkl + 6chf 4chh 5chiad 5chias 6chins 8chj chl6 5chlor 6ch2m 2chn6 ch8nie + 5cho. 8chob choi8d 6chp ch3ren ch6res ch3r� 2chs 2cht cht5ha + cht3hi 5chthon ch6tin 6chuh chu4la 6ch3unt chut6t 8chw 1ci ci5tr c2k + 2ck. ck1ei 4ckh ck3l ck3n ck5o8f ck1r 2cks ck5stra ck6s5u c2l 1c8o + con6ne 8corb cos6t c3q 1c6r 8c1t 1cu 1cy 5c�1 c�5 1da. + 8daas 2dabg 8dabr 6dabt 6dabw 1dac da2gr 6d5alk 8d5amt dan6ce. + dani5er dan8ker 2danl danla6 6dans 8danzi 6danzu d1ap da2r1a8 2d1arb + d3arc dar6men 4d3art 8darz 1dat 8datm 2d1auf 2d1aus 2d1b 2d1c 2d1d + d5de d3d2h dd�mme8 1de 2deal de5an de3cha de1e defe6 6deff 2d1ehr + 5d4eic de5isc de8lar del6s5e del6spr de4mag de8mun de8nep dene6r + 8denge. 8dengen de5o6d 2deol de5ram 8derdb der5ein de1ro der1r d8ers + der5um de4s3am de4s3an de4sau de6sil de4sin de8sor de4spr de2su 8deul + de5us. 2d1f df2l 2d1g 2d1h 1di dia5c di5ara dice5 di3chr di5ena di1gn + di1la dil8s di1na 8dind 6dinf 4d3inh 2d1ins di5o6d di3p4t di8sen dis1p + di5s8per di6s5to + dis3tr + di8tan di8tin d1j 6dje 2dju 2d1k 2d1l 2d1m + 2d1n6 dni6 dnje6 1do 6d5obe do6berf 6d5ony do3ran 6dord 2d1org dor4t3h + + 6doth dott8e 2d1p d5q dr4 1drah 8drak d5rand 6dre. 4drech + d6reck 4d3reg 8d3reic d5reife 8drem 8d1ren 2drer 8dres. 6d5rh 1dria + d1ric 8drind droi6 dro5x 1dru 8drut dr�s5c 1dr� dr�5b + dr�8sc 2ds d1sa d6san dsat6 d1sc 5d6scha. 5dschik dse8e d8serg + 8dsl d1sp d4spak ds2po d8sp� d1st d1s� 2dt d1ta d1te d1ti + d1to dt1s6 d1tu d5t� 1du du5als du1b6 du1e duf4t3r 4d3uh du5ie + 8duml 8dumw 2d1und du8ni 6d5unt dur2c durch3 6durl 6dursa 8durt + dus1t + du8schr 2d1v 2d1w dwa8l 2d1z 1d� 6d�h 8d�nd d�6r + d�8bl d5�l d�r6fl d�8sc d5�4st + + 1d� ea4ben e1ac e1ah e1akt e1al. e5alf e1alg e5a8lin e1alk e1all + e5alp e1alt e5alw e1am e1and ea6nim e1ar. e5arf e1ark e5arm e3art + e5at. e6ate e6a5t6l e8ats e5att e6au. e1aus e1b e6b5am ebens5e + eb4lie eb4ser eb4s3in e1che e8cherz e1chi ech3m 8ech3n ech1r ech8send + ech4su e1chu eck5an e5cl e1d ee5a ee3e ee5g e1ei ee5isc eei4s3t + ee6lend e1ell ee5l� e1erd ee3r4e ee8reng eere6s5 ee5r� + ee6tat e1ex e1f e6fau e8fe8b 3effek ef3rom ege6ra eglo6si 1egy e1ha + e6h5ach eh5ans e6hap eh5auf e1he e1hi ehl3a eh1le ehl5ein eh1mu ehn5ec + e1ho ehr1a eh1re ehre6n eh1ri eh1ru ehr5um e1hu eh1w e1hy e1h� + e1h� e3h�t ei1a eia6s ei6bar eich3a eich5r ei4dar ei6d5ei + ei8derf ei3d4sc ei1e 8eifen 3eifri 1eign eil1d ei6mab ei8mag ein1a4 + ei8nat ei8nerh ei8ness ei6nete ein1g e8ini ein1k ei6n5od ei8nok ei4nor + e3ins� ei1o e1irr ei5ru ei8sab ei5schn ei6s5ent ei8sol ei4t3al + eit3ar eit1h ei6thi ei8tho eit8samt ei6t5um e1j 1ekd e1ke e1ki e1k2l + e1kn ekni4 e1la e2l1al 6elan e6lanf e8lanl e6l5ans el3arb el3arm + e6l3art 5e6lasti e6lauge elbst5a e1le 6elef ele6h e6l5ehe e8leif + e6l5einh 1elek e8lel 3eleme e6lemen e6lente el5epi e4l3err e6l5ersc + elf2l elg2 e6l5ins ell8er 4e1lo e4l3ofe el8soh el8tent 5eltern e1lu + elut2 e1l� e1l� em8dei em8meis 4emo emo5s 1emp1f 1empt 1emto + e1mu emurk4 emurks5 e1m� en5a6ben en5achs en5ack e1nad en5af + en5all en3alt en1am en3an. en3ant en3anz en1a6p en1ar en1a6s 6e1nat + en3auf en3aus en2ce enda6l end5erf end5erg en8dess 4ene. en5eck + e8neff e6n5ehr e6n5eim en3eis 6enem. 6enen e4nent 4ener. e8nerd + e6n3erf e4nerg 5energi e6n5erla en5ers e6nerst en5erw 6enes e6n5ess + e2nex en3glo 2eni enni6s5 ennos4 enns8 e1no e6nober eno8f en5opf + e4n3ord en8sers ens8kl en1sp ens6por en5t6ag enta5go en8terbu en6tid + 3entla ent5ric 5entwic 5entwu 1entz enu5i e3ny en8zan en1�f + e1n�s e1n�g eo1c e5o6fe e5okk e1on. e3onf e5onk e5onl e5onr + e5opf e5ops e5or. e1ord e1org eo5r6h eo1t e1pa e8pee e6p5e6g ep5ent + e1p2f e1pi 5epid e6pidem e1pl 5epos e6pos. ep4p3a e1pr e1p� e1q + e1ra. er5aal 8eraba e5rabel er5a6ben e5rabi er3abs er3ach era5e + era5k6l er3all er3amt e3rand e3rane er3ans e5ranz. e1rap er3arc + e3rari er3a6si e1rat erat3s er3auf e3raum 3erbse er1c e1re 4e5re. + er3eck er5egg er5e2h 2erei e3rei. e8reine er5einr 6eren. e4r3enm + 4erer. e6r5erm er5ero er5erst e4r3erz er3ess 5erf�l er8gan. + 5ergebn er2g5h 5erg�nz 5erh�hu 2e1ri eri5ak e6r5iat e4r3ind + e6r5i6n5i6 er5ins e6r5int er5itio er1kl 3erkl� 5erl�s. + ermen6s er6nab 3ernst 6e1ro. e1rod er1o2f e1rog 6e3roi ero8ide e3rol + e1rom e1ron e3rop8 e2r1or e1ros e1rot er5ox ersch4 5erstat er6t5ein + er2t1h er5t6her 2e1ru eruf4s3 e4r3uhr er3ums e5rus 5erwerb e1ry er5zwa + er3zwu er�8m er5�s er�8 e3r�s. e6r1�2b e1sa + esa8b e8sap e6s5a6v e1sc esch4l ese1a es5ebe eserve5 e8sh es5ill + es3int es4kop e2sl eso8b e1sp espei6s5 es2po es2pu 5essenz e6stabs + e6staf e6st5ak est3ar e8stob e1str est5res es3ur e2sz e1s� e1ta + et8ag etari5e eta8ta e1te eten6te et5hal e5thel e1ti 1etn e1to e1tr + et3rec e8tscha et8se et6tei et2th et2t1r e1tu etu1s et8zent et8zw + e1t� e1t� e1t� eu1a2 eu1e eue8rei eu5fe euin5 euk2 + e1um. eu6nio e5unter eu1o6 eu5p 3europ eu1sp eu5str eu8zo e1v eval6s + eve5r6en ever4i e1w e2wig ex1or 1exp 1extr ey3er. e1z e1�2 + e5�8 e1� e8�es fa6ch5i fade8 fa6del fa5el. + fal6lo falt8e fa1na fan4gr 6fanl 6fap far6ba far4bl far6r5a 2f1art + fa1sc fau8str fa3y 2f1b2 6f1c 2f1d 1fe 2f1eck fe6dr feh6lei f6eim + 8feins f5eis fel5en 8feltern 8femp fe5rant 4ferd. ferri8 fe8stof + fe6str fe6stum fe8tag fet6ta fex1 2ff f1fa f6f5arm f5fe ffe5in ffe6la + ffe8ler ff1f f1fla ff3lei ff4lie ff8sa ff6s5ta 2f1g2 fgewen6 4f1h 1fi + fid4 fi3ds fieb4 fi1la fi8lei fil4m5a f8in. fi1na 8finf fi8scho fi6u + 6f1j 2f1k2 f8lanz fl8e 4f3lein 8flib 4fling f2lix 6f3lon 5flop 1flor + 5f8l�c 3fl�t 2f1m 2f1n 1fo foh1 f2on fo6na 2f1op fo5ra + for8mei for8str for8th for6t5r fo5ru 6f5otte 2f1p8 f1q fr6 f5ram + 1f8ran f8ra� f8re. frei1 5frei. f3reic f3rest f1rib + 8f1ric 6frig 1fris fro8na fr�s5t 2fs f1sc f2s1er f5str + fs3t�t 2ft f1tak f1te ft5e6h ftere6 ft1h f1ti f5to f1tr ft5rad + ft1sc ft2so f1tu ftwi3d4 ft1z 1fu 6f5ums 6funf fun4ka fu8�end + 6f1v 2f1w 2f1z 1f� f�1c 8f�rm 6f�ug + f�8� f�de3 8f�f 3f�r 1f� + f�n4f3u 1ga ga6bl 6gabw 8gabz g3a4der ga8ho ga5isc 4gak ga1la + 6g5amt ga1na gan5erb gan6g5a ga5nj 6ganl 8gansc 6garb 2g1arc 2g1arm + ga5ro 6g3arti ga8sa ga8sc ga6stre 2g1atm 6g5auf gau5fr g5aus 2g1b g5c + 6gd g1da 1ge ge1a2 ge6an ge8at. ge1e2 ge6es gef2 8geff ge1g2l ge1im + 4g3eise geist5r gel8bra gelt8s ge5l� ge8nin gen3k 6g5entf + ge3n� ge1or ge1ra ge6rab ger8au 8gerh� ger8ins ge1ro 6g5erz. + ge1r� ge1r� ge1s ges2p + ge2s7te. ge2s7ten ge2s7ter ge2s7tik + ge5unt 4g3ex3 2g1f8 2g1g g1ha 6g1hei + 5ghel. g5henn 6g1hi g1ho 1ghr g1h� 1gi gi5la gi8me. gi1na + 4g3ins + gis1tr + g1j 2g1k 8gl. 1glad g5lag glan4z3 1glas 6glass 5glaub + g3lauf 1gle. g5leb 3gleic g3lein 5gleis 1glem 2gler 8g3leu gli8a + g2lie 3glied 1g2lik 1g2lim g6lio 1gloa 5glom 1glon 1glop g1los g4loss + g5luf 1g2ly 1gl� 2g1m gn8 6gn. 1gna 8gnach 2gnah g1nas g8neu + g2nie g3nis 1gno 8gnot 1go goe1 8gof 2gog 5gogr 6g5oh goni5e 6gonist + go1ra 8gord 2g1p2 g1q 1gr4 g5rahm gra8m gra4s3t 6g1rec gre6ge 4g3reic + g5reit 8grenn gri4e g5riem 5grif 2grig g5ring 6groh 2grot gro6� + 4grut 2gs gs1ab g5sah gs1ak gs1an gs8and gs1ar gs1au g1sc + gs1ef g5seil gs5ein g2s1er gs1in g2s1o gso2r gs1pr g2s1u 2g1t g3te + g2t1h 1gu gu5as gu2e 2gue. 6gued 4g3uh 8gums 6g5unt + + gut3h gu2tu + 4g1v 2g1w gy1n g1z 1g� 8g�8m 6g�rm 1g� 1g� + 6g�b 1haa hab8r ha8del hade4n 8hae ha5el. haf6tr 2hal. ha1la + hal4b5a 6hale 8han. ha1na han6dr han6ge. 2hani h5anth 6hanz 6harb + h3arbe h3arme ha5ro ha2t1h h1atm hau6san ha8� h1b2 h1c h1d + he2bl he3cho h3echt he5d6s 5heft h5e6he. hei8ds h1eif 2hein he3ism + he5ist. heit8s3 hek6ta hel8lau 8helt he6mer 1hemm 6h1emp hen5end + hen5klo hen6tri he2nu 8heo he8q her3ab he5rak her3an 4herap her3au + h3erbi he1ro he8ro8b he4r3um her6z5er he4spe he1st heta6 het5am he5th + heu3sc he1xa hey5e h1f2 h1g hgol8 h1h h1iat hie6r5i hi5kt hil1a2 + hil4fr hi5nak hin4ta hi2nu hi5ob hirn5e hir6ner hi1sp hi1th hi5tr + 5hitz h1j h6jo h1k2 hlabb4 hla4ga hla6gr h5lai hl8am h1las h1la� + hl1c h1led h3lein h5ler. h2lif h2lim h8linf hl5int h2lip + h2lit h4lor h3lose h1l�s hme5e h2nee h2nei hn3eig h2nel hne8n + hne4p3f hn8erz h6netz h2nip h2nit h1nol hn5sp h2nuc h2nud h2nul hoch1 + 1hoh hoh8lei 2hoi ho4l3ar 1holz h2on ho1ra 6horg 5horn. ho3sl hos1p + ho4spi h1p hpi6 h1q 6hr h1rai h8rank h5raum hr1c hrcre8 h1red h3reg + h8rei. h4r3erb h8rert hrg2 h1ric hr5ins h2rom hr6t5erl hr2t1h hr6t5ra + hr8tri h6rum hr1z hs3ach h6s5amt h1sc h6s5ec h6s5erl hs8erle h4sob + h1sp h8spa� h8spel hs6po h4spun h1str h4s3tum hs3und + h1s� h5ta. h5tab ht3ac ht1ak ht3ang h5tanz ht1ar ht1at h5taub + h1te h2t1ec ht3eff ht3ehe h4t3eif h8teim h4t3ein ht3eis h6temp h8tentf + hte8ren h6terf� h8tergr h4t3erh h6t5ersc h8terst h8tese h8tess + h2t1eu h4t3ex ht1he ht5hu h1ti ht5rak hts3ah ht1sc ht6sex ht8sk ht8so + h1tu htz8 h5t�m hub5l hu6b5r huh1l h5uhr. huld5a6 hu8lent + hu8l� h5up. h1v h5weib h3weis h1z h�8kl h�l8s + h�ma8tu8 h�8sche. h�t1s h�u4s3c 2h�. + 2h�e 8h�i h�6s h�s5c h�hne6 h�l4s3t + h�tte8re i5adn i1af i5ak. i1al. i1al1a i1alb i1ald i5alei i1alf + i1alg i3alh i1alk i1all i1alp i1alr i1als i1alt i1alv i5alw i3alz + i1an. ia5na i3and ian8e ia8ne8b i1ang i3ank i5ann i1ant i1anz i6apo + i1ar. ia6rab i5arr i1as. i1asm i1ass i5ast. i1at. i5ats i1au i5azz + i6b5eig i6b5eis ib2le i4blis i6brig i6b5unt i6b�b i1che ich5ei + i6cherb i1chi ich5ins ich1l ich3m ich1n i1cho icht5an icht3r i1chu + ich1w ick6s5te ic5l i1d id3arm 3ideal ide8na 3ideol ide5r� i6diot + id5rec id1t ie1a ie6b5ar iebe4s3 ie2bl ieb1r ie8bra ie4bre ie8b� + ie2dr ie1e8 ie6f5ad ief5f ie2f1l ie4fro ief1t i1ei ie4l3ec ie8lei + ie4lek i3ell i1en. i1end ien6e i3enf i5enn ien6ne. i1enp i1enr + i5ensa ien8stal i5env i1enz ie5o ier3a4b ie4rap i2ere ie4rec ie6r5ein + ie6r5eis ier8er i3ern. ie8rum ie8rund ie6s5che ie6tau ie8tert ie5the + ie6t5ri i1ett ie5un iex5 2if i1fa if5ang i6fau if1fr if5lac i5f6lie + i1fre ift5a if6t5r ig3art 2ige i8gess ig5he i5gla ig2ni i5go ig3rot + ig3s2p i1ha i8ham i8hans i1he i1hi ih1n ih1r i1hu i8hum ih1w 8i1i ii2s + ii2t i1j i1k i6kak i8kerz i6kes ik4ler i6k5unt 2il i5lac i1lag il3ans + i5las i1lau il6auf i1le ile8h i8lel il2fl il3ipp il6l5enn i1lo ilt8e + i1lu i1l� i8mart imb2 i8mele i8mid imme6l5a i1mu i1m� + i5m� ina5he i1nat in1au inau8s 8ind. in4d3an 5index ind2r 3indus + i5nec i2n1ei i8nerw 3infek 1info 5ingeni ing5s6o 5inhab ini5er. 5inj + in8k�t in8nan i1no inoi8d in3o4ku in5sau in1sp 5inspe 5instit + 5instru ins4ze 5intere 5interv in3the in5t2r i5ny in�2 i1n�r + in1�s in�8 in5�d i1n�s 2io io1a8 io1c iode4 io2di + ioi8 i1ol. i1om. i1on. i5onb ion2s1 i1ont i5ops i5o8pt i1or. + i3oral io3rat i5orc i1os. i1ot. i1o8x 2ip i1pa i1pi i1p2l i1pr i1q + i1ra ir6bl i1re i1ri ir8me8d ir2m1o2 ir8nak i1ro ir5rho ir6schl + ir6sch5r i5rus i5ry i5r� i1sa i8samt i6sar i2s1au i8scheh i8schei + isch5m isch3r isch�8 is8ele ise3ra i4s3erh is3err isi6de i8sind + is4kop ison5e is6por i8s5tum i5sty i5s� i1ta it5ab. i2t1a2m + i8tax i1te i8tersc i1thi i1tho i5thr it8h� i1ti i8ti8d iti6kl + itmen4 i1to i8tof it3ran it3rau i1tri itri5o it1sc it2se it5spa it8tru + i1tu it6z5erg it6z1w i1t� it�6r5e it�t2 it�ts5 + i1t� i1u iu6r 2i1v i6vad iva8tin i8vei i6v5ene i8verh i2vob i8vur + i1w iwi2 i5xa i1xe i1z ize8n i8zir i6z5w i�8m i1�6r + i5�t. i5�v i1�8 i�8 i6�5ers ja5la + je2t3r 6jm 5jo jo5as jo1ra jou6l ju5cha jugen4 jugend5 jung5s6 + + 3j� 1ka 8kachs 8kakz ka1la kal5d kam5t ka1na 2kanl 8kapf ka6pl + ka5r6a 6k3arbe ka1ro kar6p5f 4k3arti 8karz ka1r� kasi5e ka6teb + kat8ta kauf6s kau3t2 2k1b 2k1c 4k1d kehr6s kehrs5a 8keic 2k1eig 6k5ein + 6k5eis ke6lar ke8leis ke8lo 8kemp k5ente. k3entf 8k5ents 6kentz ke1ra + k5erlau 2k1f8 2k1g 2k1h ki5fl 8kik king6s5 6kinh ki5os ki5sp ki5th + 8ki8� 2k1k2 kl8 1kla 8klac k5lager kle4br k3leib 3kleid kle5isc + 4k3leit k3lek 6k5ler. 5klet 2klic 8klig k2lim k2lin 5klip 5klop k3lor + 1kl� 2k1m kmani5e kn8 6kner k2ni kn�8 1k2o ko1a2 ko6de. + ko1i koi8t ko6min ko1op ko1or ko6pht ko3ra kor6d5er ko5ru ko5t6sc k3ou + 3kow 6k5ox 2k1p2 k1q 1kr8 4k3rad 2k1rec 4k3reic kre5ie 2krib 6krig + 2krip 6kroba 2ks k1sa k6sab ksal8s k8samt k6san k1sc k2s1ex k5spat + k5spe k8spil ks6por k1spr kst8 k2s1uf 2k1t kta8l kt5a6re k8tein kte8re + k2t1h k8tinf kt3rec kt1s 1ku ku1ch kuck8 k3uhr ku5ie kum2s1 kunfts5 + kun2s kunst3 ku8rau ku4ro kurz1 + + 4kusti ku1ta ku8� + 6k1v 2k1w ky5n 2k1z 1k� k�4m 4k3�mi k�se5 1k� + k�1c k�1s 1k� k�1c k�r6sc + + 1la. + 8labf 8labh lab2r 2l1abs lach3r la8dr 5ladu 8ladv 6laff laf5t la2gn + 5laken 8lamb la6mer 5lampe. 2l1amt la1na 1land lan4d3a lan4d3r lan4gr + 8lanme 6lann 8lanw 6lan� 8lappa lap8pl lap6pr l8ar. la5ra lar4af + la8rag la8ran la6r5a6s l3arbe la8rei 6larm. la8sa la1sc la8sta lat8i + 6l5atm 4lauss 4lauto 1law 2lb l8bab l8bauf l8bede l4b3ins l5blo + lbst5an lbst3e 8lc l1che l8chert l1chi lch3m l5cho lch5w 6ld l4d3ei + ld1re l6d�b le2bl le8bre lecht6s5 led2r 6leff le4gas 1lehr lei6br + le8inf 8leinn 5leistu 4lektr le6l5ers lemo2 8lemp l8en. 8lends + 6lendun le8nend len8erw 6l5ents 4l3entw 4lentz 8lenzy 8leoz 6lepi + le6pip 8lepo 1ler l6er. 8lerbs 6l5erde le8reis le8rend le4r3er 4l3erg + l8ergr 6lerkl 6l5erzie 8ler� 8lesel lesi5e le3sko le3tha let1s + 5leuc 4leuro leu4s3t le5xe 6lexp l1f 2l1g lgend8 l8gh lglie3 lglied6 + 6l1h 1li li1ar li1as 2lick li8dr li1en lien6n li8ers li8ert 2lie� + 3lig li8ga8b li1g6n li1l8a 8limb li1na 4l3indu lings5 + 4l3inh 6linj link4s3 4linkt 2lint 8linv + + 4lipp 5lipt 4lisam + livi5e 6l1j 6l1k l8keim l8kj lk2l lko8f lkor8 lk2sa lk2se 6ll l1la + ll3a4be l8labt ll8anl ll1b ll1c ll1d6 l1le l4l3eim l6l5eise ller3a + l4leti l5lip l1lo ll3ort ll5ov ll6spr llte8 l1lu ll3urg l1l� + l5l� l6l�b 2l1m l6m5o6d 6ln l1na l1no 8lobl lo6br 3loch. + l5o4fen 5loge. 5lohn 4l3ohr 1lok l2on 4l3o4per lo1ra 2l1ord 6lorg + 4lort lo1ru 1los. lo8sei 3losig lo6ve lowi5 6l1p lp2f l8pho l8pn + lp4s3te l2pt l1q 8l1r 2ls l1sa l6sarm l1sc l8sec l6s5erg l4s3ers l8sh + l5s6la l1sp ls4por ls2pu l1str l8suni l1s� 2l1t lt5amp l4t3ein + l5ten l6t5eng l6t5erp l4t3hei lt3her l2t1ho l6t5i6b lti1l l8tr� + lt1sc lt6ser lt4s3o lt5ums lu8br lu2dr lu1en8 8lu8fe luft3a luf8tr + lu6g5r 2luh l1uhr lu5it 5luk 2l1umf 2l1umw 1lun 6l5u6nio 4l3unte lu5ol + 4lurg 6lurs l3urt lu4sto + lus1tr + lu6st5re lu8su lu6tal lu6t5e6g lu8terg + lu3the lu6t5or lu2t1r lu6�5 l1v lve5r6u 2l1w 1ly lya6 + 6lymp ly1no l8zess l8zo8f l3zwei lz5wu 3l�nd l�5on + l�6sc l�t1s 5l�uf 2l�ug l�u6s5c l�5v + l1�l 1l�s l�1�6t 6l1�be 1ma + 8mabg ma5chan mad2 ma5el 4magg mag8n ma1la ma8lau mal5d 8malde mali5e + malu8 ma8lut 2m1amp 3man mand2 man3ds 8mangr mani5o 8m5anst 6mappa + 4m3arbe mar8kr ma1r4o mar8schm 3mas ma1sc ma1t� 4m5auf ma5yo 2m1b + mb6r 2m1c 2m1d md6s� 1me me1ch me5isc 5meld mel8sa 8memp me5nal + men4dr men8schl men8schw 8mentsp me1ra mer4gl me1ro 3mes me6s5ei me1th + me8� 2m1f6 2m1g 2m1h 1mi mi1a mi6ale mi1la 2m1imm mi1na + mi5n� mi4s3an mit1h mi5t6ra 3mitt mitta8 mi6�5 6mj + 2m1k8 2m1l 2m1m m6mad m6m5ak m8menth m8mentw mme6ra m2mn mm5sp mm5ums + mmut5s m8m�n m1n8 m5ni 1mo mo5ar mo4dr 8mof mo8gal mo4kla mol5d + m2on mon8do mo4n3od + mon2s1tr + mont8a 6m5ony mopa6 mo1ra mor8d5a mo1sc mo1sp 5mot + moy5 2mp m1pa mpfa6 mpf3l mphe6 m1pi mpin6 m1pl mp2li m2plu mpo8ste + m1pr mpr�5 mp8th mput6 mpu5ts m1p� 8m1q 2m1r 2ms ms5au m1sc + msch4l ms6po m3spri m1str 2m1t mt1ar m8tein m2t1h mt6se mt8s� + mu5e 6m5uh mumi1 1mun mun6dr muse5e mu1ta 2m1v mvol2 mvoll3 2m1w 1my + 2m1z m�6kl 1m�n m�1s m�5tr m�u4s3c 3m�� + m�b2 6m�l 1m� 5m�n 3m�t 1na. + n5ab. 8nabn n1abs n1abz na6b� na2c nach3e 3nacht 1nae na5el + n1afr 1nag 1n2ah na8ha na8ho 1nai 6nair na4kol n1akt nal1a 8naly 1nama + na4mer na1mn n1amp 8n1amt 5nanc nan6ce n1and n6and. 2n1ang 1nani + 1nann n1ans 8nanw 5napf. 1n2ar. na2ra 2n1arc n8ard 1nari n8ark + 6n1arm 5n6ars 2n1art n8arv 6natm nat6s5e 1naue 4nauf n3aug 5naui n5auk + na5um 6nausb 6nauto 1nav 2nax 3naz 1na� n1b2 nbau5s n1c + nche5e nch5m 2n1d nda8d n2d1ak nd5ans n2d1ei nde8lac ndel6sa n8derhi + nde4se nde8stal n2dj ndnis5 n6d5or6t nd3rec nd3rot nd8samt nd6sau + ndt1h n8dumd 1ne ne5as ne2bl 6n5ebn 2nec 5neei ne5en ne1g4l 2negy + 4n1ein 8neis 4n3e4lem 8nemb 2n1emp nen1a 6n5energ nen3k 8nentb + 4n3en3th 8nentl 8n5entn 8n5ents ne1ra ne5r8al ne8ras 8nerbi 6n5erde. + nere5i6d nerfor6 6n5erh� 8nerl� 2n1err n8ers. 6n5ertra + 2n1erz nesi3e net1h neu4ra neu5sc 8neu� n1f nf5f nf2l + nflei8 nf5lin nft8st n8g5ac ng5d ng8en nge8ram ngg2 ng1h n6glic ng3rip + ng8ru ng2se4 ng2si n2g1um n1gy n8g�l n1h nhe6r5e 1ni ni1bl + ni5ch� ni8dee n6ie ni1en nie6s5te niet5h ni8etn 4n3i6gel n6ik + ni1la 2n1imp ni5na 2n1ind 8ninf 6n5inh ni8nit 6n5inn 2n1ins 4n1int + n6is + nis1tr + ni1th ni1tr n1j n6ji n8kad nk5ans n1ke n8kerla n1ki nk5inh + n5kl� n1k2n n8k5not nk3rot n8kr� nk5spo nk6t5r n8kuh + n6k�b n5l6 nli4mi n1m nmen4s n1na n8nerg nni5o n1no nn4t3ak nnt1h + nnu1e n1ny n1n� n1n� n1n� no5a no4b3la 4n3obs 2nobt + noche8 no6die no4dis no8ia no5isc 6n5o6leu no4mal noni6er 2n1onk n1ony + 4n3o4per 6nopf 6nopti no3ra no4ram nor6da 4n1org 2n1ort n6os no1st + 8nost. no8tan no8ter noty6pe 6n5ox n1p2 n1q n1r nr�s3 6ns n1sac + ns3ang n1sc n8self n8s5erf n8serg n6serk ns5erw n8sint n1s2pe n1spr + n6s5tat. + + n6stob n1str n1ta n4t3a4go nt5anh nt3ark nt3art + n1te nt3eis nte5n6ar nte8nei nter3a nte6rei nt1ha nt6har n3ther nt5hie + n3thus n1ti nti1c n8tinh nti1t ntlo6b ntmen8 n1to nt3o4ti n1tr ntra5f + ntra5ut nt8rea nt3rec nt8rep n4t3rin nt8rop n4t3rot n4tr� nt1s + nts6an nt2sk n1tu nt1z n1t� n1t� n8t�l n1t� 1nu + nu1a nu5el nu5en 4n1uhr nu5ie 8numl 6n5ums 6n5umw 2n1und 6nuni 6n5unr + 2n1unt 2nup 2nu6r n5uri nu3skr nu5ta n1v 8n1w 1nys n1za n6zab n2z1ar + n6zaus nzi4ga n8zof n6z5unt n1zw n6zwir 1n�c 5n�e 5n�i + n8�l n�6m n�6re n5�rz 5n�us n1�l + 1n�t n5�z 5n�. 6n1�2b 5n�� + o5ab. oa2l o8ala o1a2m o1an ob1ac obe4ra o6berh 5o4bers o4beru + obe6ser 1obj o1bl o2bli ob5sk 3obst. ob8sta obst5re ob5sz o1che + oche8b o8chec o3chi och1l och3m ocho8f o3chro och3to o3chu och1w o1d + o2d1ag od2dr ode5i ode6n5e od1tr o5e6b o5e6der. oe8du o1ef o1e2l + o1e2p o1er. o5e8x o1fa of8fan 1offi of8fin of6f5la o5fla o1fr 8o1g + og2n o1ha o1he o6h5eis o1hi ohl1a oh1le oh4l3er 5ohm. oh2ni o1ho + oh1re oh1ru o1hu oh1w o1hy o1h� o5ia o1id. o8idi oi8dr o5ids + o5isch. oiset6 o1ism o3ist. o5i6tu o1j o1k ok2l ok3lau o8kl� + 1okta o1la old5am old5r o1le ole5in ole1r ole3u ol6gl ol2kl olk4s1 + ol8lak ol8lauf. ol6lel ol8less o1lo + ol1s ol2ster + ol6sk o1lu oly1e2 5olym + o2mab om6an o8mau ombe4 o8merz om5sp o1mu o8munt o1m� o1m� + o1na ona8m on1ax on8ent o6n5erb 8oni oni5er. on1k on6n5a6b o1no ono1c + o4nokt 1ons onts8 o1n� oo8f 1oog oo2pe oo2sa o1pa 3o4pera o3pfli + opf3lo opf3r o1pi o1pl o2pli o5p6n op8pa op6pl o1pr o3p4ter 1opti + o1p� o5p� o1q o1ra. o3rad o8radd 1oram o6rang o5ras o8rauf + or5cha or4d3a4m or8dei or8deu 1ordn or4dos o1re o5re. ore2h o8r5ein + ore5isc or6enn or8fla or8fli 1orga 5orgel. or2gl o1ri 5o6rient or8nan + or8n� o1ro or1r2h or6t5an or8tau or8tere o1rus o1ry o1r� + or1�2 o1sa osa3i 6ose o8serk o1sk o6ske o6ski os2kl os2ko os2kr + osni5e o2s1o2d o3s4per o4stam o6stau o3stra ost3re osu6 o6s5ur o5s6ze + o1ta ot3auf o6taus o1te o6terw o1th othe5u o2th1r o1ti o1to oto1a + ot1re o1tri o1tro ot1sc o3tsu ot6t5erg ot2t3h ot2t5r ot8t� o1tu + ou3e ouf1 ou5f6l o5u6gr ou5ie ou6rar ou1t6a o1v o1wa o1we o6wer. o1wi + owid6 o1wo o5wu o1xe oy5al. oy1e oy1i o5yo o1z oza2r 1o2zea ozo3is + o�8 o�5elt o�1t 3paa pa6ce 5pad pag2 1pak + pa1la pa8na8t pani5el pa4nor pan1s2 1pap pap8s pa8rei par8kr paro8n + par5o6ti part8e 5partei 3partn pas6sep pa4tha 1pau 6paug pau3sc p1b + 8p5c 4p1d 1pe 4peic pe5isc 2pek pen3k pen8to8 p8er pe1ra pere6 per5ea + per5eb pe4rem 2perr per8ran 3pers 4persi pe3r� pe4sta pet2s + p2f1ec p4fei pf1f pf2l 5pflanz pf8leg pf3lei 2pft pf3ta p1g 1ph 2ph. + 2p1haf 6phb 8phd 6p5heit ph5eme 6phg phi6e 8phk 6phn p5holl pht2 + ph3tha 4ph3the phu6 6phz pi1en pi5err pi1la pi1na 5pinse pioni8e 1pis + pi1s2k pi1th p1k pl8 5pla p2lau 4plei p3lein 2pler 6p5les 2plig p6lik + 6p5ling p2liz plo8min 6p1m p1n 1p2o 8poh 5pol po8lan poly1 po3ny po1ra + 2porn por4t3h po5r� 5poti p1pa p6p5ei ppe6la pp5f p2p1h p1pi pp1l + ppp6 pp5ren + pp1s pp2ste + p5p� pr6 3preis 1pres 2p3rig 5prinz 1prob 1prod + 5prog pro8pt pro6t5a prote5i 8pro� pr�3l 1pr�s + pr�te4 1pr�f p5schl 2pst 1p2sy p1t p8to8d pt1s 5p6ty 1pu + pu1b2 2puc pu2dr puf8fr 6p5uh pun8s pu8rei pu5s6h pu1ta p1v p3w 5py + py5l p1z p�6der p5�6m p�8nu 8p�r p�t5h + p�t1s qu6 1qui 8rabk ra6bla 3rable ra2br r1abt 6rabz ra4dan ra2dr + 5rafal ra4f3er ra5gla ra2g3n 6raha ral5am 5rald 4ralg ra8lins 2rall + ral5t 8ramei r3anal r6and ran8der ran4dr 8ranf 6ranga 5rangi ran8gli + r3angr rans5pa 8ranw r8anz. ra5or 6rapf ra5pl rap6s5er 2r1arb 1rarh + r1arm ra5ro 2r1art 6r1arz ra8tei ra6t5he 6ratl ra4t3ro r5atta raue4n + 6raus. r5austa rau8tel raut5s ray1 r1b rb5lass r6bler rb4lie rbon6n + r8brecht rb6s5t� r8ces r1che rch1l rch3m rch3re rch3tr rch1w 8rd + r1da r8dachs r8dap rda5ro rde5ins rdio5 r8dir rd3ost r1dr r8drau 1re. + re1ak 3reakt re3als re6am. re1as 4reben re6bl rech5a r8edi re3er + 8reff 3refl 2reh 5reha r4ei. reich6s5 8reier 6reign re5imp 4r3eina + 6r3einb 6reing 6r5einn 6reinr 4r3eins r3eint reli3e 8r5elt 6rempf + 2remt ren5a6b ren8gl r3enni 1reno 5rente 4r3enth 8rentl 4r3entw 8rentz + ren4zw re1on requi5 1rer rer4bl 6rerbs 4r3erd 8rerh� 8rerkl + 4r3erla 8rerl� 4r3erns 6r5ern� rer5o 6r5erreg r5ertr r5erwec + r5er� re2sa re8schm 2ress re5u8ni 6rewo 2r1ex r1f r8ferd rf4lie + 8r1g r8gah rge4bl rge5na rgest4 rg6ne r2gni2 r8gob r4g3ret rg8sel r1h8 + r2hy 5rhyt ri1ar ri5cha rid2g r2ie rieg4s5 ri8ei ri1el ri6ele ri1en + ri3er. ri5ers. ri6fan ri8fer ri8fr 1r2ig ri8kn ri5la rim�8 + ri1na r8inde rin4ga rin6gr 1rinn 6rinner rino1 r8insp 4rinst + ri1n� ri5o6ch ri1o2d ri3o6st 2r1ir r2is ri3sko ri8spr + + ri5sv r2it 6r5i6tal ri5tr ri6ve. 8r1j 6rk r1ke rkehrs5 r1ki r3klin + r1k2n rk3str rk4t3an rk6to r6kuh rk�4s3t r1l r5li rline5a 6r1m + r6manl rma4p r4m3aph r8minf r8mob rm5sa 2rn r1na rna8be r5ne rn2ei + r6neif r6nex r6nh rn1k r1no r6n5oc rn1sp r1n� r1n� ro6bern + 6robs ro1ch 3rock. ro5de ro1e 4rofe ro8hert 1rohr ro5id ro1in ro5isc + 6rolym r2on 6roog ro6phan r3ort ro1s2p ro5s6w ro4tau ro1tr ro6ts 5rout + r1p rpe8re rp2f r2ps r2pt r1q 2rr r1ra r1re rrer6 + rr6hos r5rh� + r1ri r1ro rro8f rr8or rror5a r1ru r3ry r1r� r1r� r1r� + 2r1s + r2ste r2sti + r6sab r4sanf rse6e rse5na r2sh r6ska r6ski rs2kl r8sko r2sl rs2p + r6stauf r8sterw r8stran rswi3d4 r2sz 2r1t rt3art r8taut r5tei rt5eige + r8tepe r4t3erh r8terla r4t3hei r5t6hu r4t3int rt5reif rt1sc rt6ser + rt6s5o rt6s5u rt5und r8turt rube6 ru1en 1r4uf ruf4st ru1ie 2r1umg + 2r1uml 2rums run8der run4d5r 6rundz 6runf 8runs 2r1unt 2r1ur r6us + ru6sta + rus1tr + ru6tr 1ruts r1v rven1 rvi2c r1w r1x r1za rz5ac r6z5al + r8z1ar r8zerd r6z5erf rz8erh rz4t3h r8zum r�4ste r�u8sc + r1�f 5r�hr r�5le 3r�ll 5r�mis r1�r + r�2sc 3r�mp 1sa. 1saa s3a4ben sa2bl 2s1abs 6s1abt 6sabw + 3sack. 6s3a4der 1saf sa1fa 4s1aff sa5fr 1sag 1sai sa1i2k1 4s1akt 1sal + sa1la 4s3alpi 6salter salz3a 1sam s5anb san2c 1sand s5angeh 6sanl + 2s1ans 6s3antr 8s1anw s1ap s6aph 8sapo sap5p6 s8ar. 2s1arb 3sarg + s1arm sa5ro 2s1art 6s1arz 1sas 1sat sat8a 2s1atl sa8tom 3s8aue s5auff + sau5i s6aur 2s1aus 5s6ause 2s1b2 2sca s4ce 8sch. 3scha. 5schade + 3schaf 3schal sch5ame 8schanc 8schb 1sche 6schef 8schex 2schf 2schg + 2schh 1schi 2schk 5schlag 5schlu 6schm�� + 6schna� 1scho 6schord 6schp 3schri 8schric 8schrig + 8schrou 6schs 2scht sch3ta sch3tr 1schu 8schunt 6schv 2schz 5sch� + 5sch� 2sco scre6 6scu 2s1d 1se se5an se1ap se6ben se5ec see5i6g + se3erl 8seff se6han se8hi se8h� 6s5eid. 2s1eig s8eil 5sein. + sei5n6e 6s5einh 3s8eit 3sel. se4lar selb4 6s3e4lem se8lerl 2s1emp + sen3ac se5nec 6s5ents 4sentz s8er. se8reim ser5inn 8serm� + 8s5erzi 6ser�f se1um 8sexa 6sexp 2s1f2 sfal8ler 2s3g2 sge5b2 s1h + s8hew 5s6hip 5s4hop 1si 2siat si1b sicht6s 6s5i6dee siege6s5 si1en + si5err si1f2 si1g2n si6g5r si8kau sik1i si4kin si2kl si8k� si1la + sil6br si1na 2s1inf sin5gh 2s1inh sinne6s5 2s1ins si5ru si5str 4s1j + s1k2 6sk. 2skau skel6c skelch5 s6kele 1s2ki. 3s4kin. s6kiz s8kj + 6skn 2skow 3skrib 3skrip 2sku 8sk� s1l s8lal slei3t s4low 2s1m + s1n 6sna 6snot 1so so1ch 2s1odo so4dor 6s5o4fen solo3 s2on so5of 4sope + so1ra 2s1ord 4sorga sou5c so3un 4s3ox sp2 8spaa 5spal 1span 2spap + s2pec s4peis 1spek s6perg 4spers s6pes 2s1pf 8sphi 1s2ph� 1spi + spi4e 6s5pig 6spinse 2spis 2spla 2spol 5s6pom 6s5pos 6spoti 1spra + 3s8prec 6spreis 5spring 6sprob 1spru s2pul 1s2pur 6spy 5sp�n + 1sp� s1q 2s1r + + + 2ssa 2sse 2ssi 2sso 2ss� 2ss� 2ss� 2s1sch + sse8nu ssini6s ssoi6r 2st. + 1sta 4stafe 2stag + sta3la 6stale + 4s2talg + 8stalk 8stamt 6st5anf 4stans 6stanw 6starb sta4te + 6staus 2stb 6stc 6std + s1te + 4steil + + 6steppi + + 8stesse 6stf 2stg 2sth st1ha st3hei s8t1hi st1ho st5hu + s1ti + s2ti4el + 4s2tigm + + 6s2tind + 4s2tinf + s2ti8r + 2stk 2stl 2stm + 1sto 6stoll. 4st3ope + 6stopf. 6stord 6stp + + 4strai + s3tral + 6s5traum 3stra� + 3strec 6s3tref 8streib 5streif 6streno 6stres 6strev + + 2st5rig + + 8s2t1ris + + s8troma st5rose 4struf 3strum + 6str�g 2st1s6 2stt + 1stu stu5a 4stuc 2stue 8stun. 2stv 2stw s2tyl + 6stz 1st� 8st�g + 1st� + 1st� 8st�ch 4st�r. + 1su su2b1 3suc su1e su2fe su8mar 6sumfa 8sumk 2s1unt sup1p2 6s5u6ran + 6surte 2s1v 2s1w 1sy 8syl. sy5la syn1 sy2na syne4 s1z s4zend 5s6zene. + 8szu 1s� 6s5�nd 6s�ugi 6s�u� + 5s�m 2s1�2b 1s�c s�8di 1s�n 5s�� + taats3 4tab. taba6k ta8ban tab2l ta6bre 4tabs t3absc + 8tabz 6t3acht ta6der 6tadr tad6s tad2t 1tafe4 1tag ta6ga6 ta8gei + tage4s tag6s5t tah8 tahl3 tai6ne. ta5ir. tak8ta tal3au 1tale ta8leng + tal5ert 6t5a6mer 6tamp tampe6 2t1amt tan5d6a tan8dr tands5a tani5e + 6tanl 2tanr t3ans 8t5antr tanu6 t5anw 8tanwa tan8zw ta8rau 6tarbe + 1tari 2tark 2t1arm ta1ro 2tart t3arti 6tarz ta1sc ta6sien ta8stem + ta8sto t5aufb 4taufn 8taus. 5tause 8tausf 6tausg t5ausl 2t1b2 2t1c + t6chu 2t1d te2am tea4s te8ben 5techn 4teff te4g3re te6hau 2tehe te4hel + 2t1ehr te5id. teig5l 6teign tei8gr 1teil 4teinh t5einhe 4teis t5eisen + 8teiw te8lam te4lar 4telek 8telem te6man te6n5ag ten8erw ten5k tens4p + ten8tro 4t3entw 8tentz te6pli 5teppi ter5a6b te3ral ter5au 8terbar + t5erbe. 6terben 8terbs 4t3erbt t5erde. ter5ebe ter5ein te8rers terf4 + 8terh� 6terkl� ter8nor ter6re. t8erscha t5e6sel te8stau + t3euro te1xa tex3e 8texp tex6ta 2t1f2 2t1g2 2th. th6a 5tha. 2thaa + 6t1hab 6t5haf t5hah 8thak 3thal. 6thals 6t3hand 2t1hau 1the. 3t4hea + t1heb t5heil t3heit t3helf 1theo 5therap 5therf 6t5herz 1thes 1thet + 5thi. 2t1hil t3him 8thir 3this t5hj 2th1l 2th1m th1n t5hob t5hof + 4tholz 6thopti 1thr6 4ths t1hum 1thy 4t1h� 2t1h� t1h� + ti1a2m ti1b tie6fer ti1en ti8gerz tig3l ti8kin ti5lat 1tilg t1ind + tin4k3l ti3spa ti5str 5tite ti5tr ti8vel ti8vr 2t1j 2t1k2 2t1l tl8a + 2t1m8 2t1n 3tobe 8tobj to3cha 5tocht 8tock tode4 to8del to8du to1e + 6t5o6fen to1in toi6r 5toll. to8mene t2ons 2t1ony to4per 5topf. 6topt + to1ra + to1s to2ste + to6ska tos2l 2toti to1tr t8ou 2t1p2 6t1q tr6 tra5cha + tra8far traf5t 1trag tra6gl tra6gr t3rahm 1trai t6rans tra3sc tra6st + 3traue t4re. 2trec t3rech t8reck 6t1red t8ree 4t1reg 3treib 4treif + 8t3reis 8trepo tre6t5r t3rev 4t3rez 1trib t6rick tri6er 2trig t8rink + tri6o5d trizi5 tro1a 3troc trocke6 troi8d tro8man. tro3ny 5tropf + 6t5rosa t5ro� 5trub 5trup trut5 1tr�g 6t1r�h + 5tr�b tr�3bu t1r�c t1r�s 2ts ts1ab t1sac tsa8d + ts1ak t6s5alt ts1an ts1ar ts3auf t3schr t5sch� tse6e tsee5i + tsein6s ts3ent ts1er t8serf t4serk t8sh 5t6sik t4s3int ts5ort. + t5s6por t6sprei + t1st t2ste + t6s5tanz ts1th t6stit t4s3tor 1t2sua t2s1uf + t8sum. t2s1u8n t2s1ur 2t1t tt5eif tte6sa tt1ha tt8ret tt1sc tt8ser + tt5s6z 1tuc tuch5a 1tu1e 6tuh t5uhr tu1i tu6it 1tumh 6t5umr 1tums + 8tumt 6tund 6tunf 2t1unt tu5ra tu6rau tu6re. tu4r3er 2t1v 2t1w 1ty1 + ty6a ty8la 8tym 6ty6o 2tz tz5al tz1an tz1ar t8zec tzeh6 tzehn5 t6z5ei. + t6zor t4z3um t6z�u 5t�g 6t�h t5�lt t8�n + t�re8 8t�8st 6t�u� t5�ffen + 8t�8k 1t�n 4t�b t6�5ber. 5t�ch 1t�r. + u3al. u5alb u5alf u3alh u5alk u3alp u3an. ua5na u3and u5ans u5ar. + ua6th u1au ua1y u2bab ubi5er. u6b5rit ubs2k u5b� u8b�b 2uc + u1che u6ch5ec u1chi uch1l uch3m uch5n uch1r uch5to ucht5re u1chu uch1w + uck1a uck5in u1d ud4a u1ei u6ela uene8 u6ep u1er uer1a ue8rerl uer5o + u8esc u2est u8ev u1fa u2f1ei u4f3ent u8ferh uf1fr uf1l uf1ra uf1re + uf1r� uf1r� uf1s2p uf1st uft1s u8gabt u8gad u6gap ugeb8 u8gn + ugo3s4 u1ha u1he u1hi uh1le u1ho uh1re u1hu uh1w u1h� u1h� + 6ui ui5en u1ig u3ins uin8tes u5isch. u1j 6uk u1ke u1ki u1kl u8klu + u1k6n u5ky u1la uld8se u1le ul8lac ul6lau ul6le6l ul6lo ulni8 u1lo + ulo6i ult6a ult8e u1lu ul2vr u1l� u1l� 3umfan 5umlau umo8f + um8pho u1mu umu8s u5m� u1n1a un2al un6at unau2 6und. 5undein + un4d3um 3undzw und�8 un8d�b une2b un1ec une2h un3eis 3unfal + 1unf� 5ungea 3ungl� ung2s1 un8g� 1u2nif un4it un8kro + unk5s u1no unpa2 uns2p unvol4 unvoll5 u5os. u1pa u1pi u1p2l u1pr + up4s3t up2t1a u1q u1ra ur5abs ura8d ur5ah u6rak ur3alt u6rana u6r5ans + u8rap ur5a6ri u8ratt u1re ur3eig ur8gri u1ri ur5ins 3urlau urmen6 + ur8nan u1ro 3ursac ur8sau ur8sei ur4sk 3urtei u1ru uru5i6 uru6r u1ry + ur2za ur6z� ur5�6m u5r� u1r� ur�ck3 u1sa + usa4gi u2s1ar u2s1au u8schec usch5wi u2s1ei use8kel u8sl u4st3a4b + us3tau + + u2s1uf u8surn ut1ac u1tal uta8m u1tan ut1ar u1tas ut1au + u1te u8teic u4tent u8terf u6terin u4t3hei ut5ho ut1hu u1ti utine5 + uti6q u1to uto5c u1tr ut1sa ut1s6p ut6stro u1tu utz5w u1u u1v uve5n + uve3r4� u1w u1xe u5ya uy5e6 u1yi u2z1eh u8zerh u5� u�e6n + u�en5e 8vanb 6vang 6varb var8d va6t5a va8tei + va2t1r 2v1b 6v5c 6vd 1ve 6ve5g6 ver1 ver5b verb8l ve2re2 verg8 ve2ru8 + ve1s ve2s3p ve3xe 2v1f 2v1g 6v5h vi6el vie6w5 vi1g4 vi8leh vil6le. + 8vint vi1ru vi1tr 2v1k 2v1l 2v1m 4v5n 8vo8f voi6le vol8lend vol8li + v2or1 vo2re vo8rin vo2ro 2v1p 8vra v6re + 2v2s + 2v1t 2v1v 4v3w 2v1z + waffe8 wa6g5n 1wah wah8n wa5la wal8din wal6ta wan4dr 5ware wa8ru + war4za 1was w5c w1d 5wech we6fl 1weg we8geng weg5h weg3l we2g1r + weh6r5er 5weise weit3r wel2t welt3r we6rat 8werc 5werdu wer4fl 5werk. + wer4ka wer8ku wer4ta wer8term we2sp + we8s4tend + + we8str + we8st� wet8ta wich6s5t 1wid wi2dr wiede4 wieder5 wik6 wim6ma + win4d3r 5wirt wisch5l 1wj 6wk 2w1l 8w1n wo1c woche6 wol6f wor6t5r 6ws2 + w1sk 6w5t 5wunde. wun6gr wu1sc wu2t1 6w5w wy5a w�rme5 w�1sc + 1xag x1ak x3a4men 8xamt x1an 8x1b x1c 1xe. x3e4g 1xen xe1ro x1erz + 1xes 8xf x1g 8x1h 1xi 8xid xi8so 4xiste x1k 6x1l x1m 8xn 1xo 8x5o6d + 8x3p2 x1r x1s6 8x1t x6tak x8terf x2t1h 1xu xu1e x5ul 6x3w x1z 5ya. + y5an. y5ank y1b y1c y6cha y4chia y1d yen6n y5ern y1g y5h y5in y1j + y1k2 y1lak yl1al yla8m y5lax y1le y1lo y5lu y8mn ym1p2 y3mu y1na yno2d + yn1t y1on. y1o4p y5ou ypo1 y1pr y8ps y1r yri3e yr1r2 + + ys5iat ys8ty + y1t y3w y1z y�8m z5a6b zab5l 8za6d 1zah za5is 4z3ak 6z1am 5zange. + 8zanl 2z1ara 6z5as z5auf 3zaun 2z1b 6z1c 6z1d 1ze ze4dik 4z3eff 8zein + zei4ta zei8ters ze6la ze8lec zel8th 4zemp 6z5engel zen8zin 8zerg� + zer8i ze1ro zers8 zerta8 zer8tab zer8tag 8zerz ze8ste zeu6gr 2z1ex + 2z1f8 z1g 4z1h 1zi zi1en zi5es. 4z3imp zi1na 6z5inf 6z5inni zin6s5er + 8zinsuf zist5r zi5th zi1tr 6z1j 2z1k 2z1l 2z1m 6z1n 1zo zo6gl 4z3oh + zo1on zor6na8 4z1p z5q 6z1r 2z1s8 2z1t z4t3end z4t3hei z8thi 1zu zu3al + zu1b4 zu1f2 6z5uhr zun2a 8zunem zunf8 8zungl zu1o zup8fi zu1s8 zu1z + 2z1v zw8 z1wal 5zweck zwei3s z1wel z1wer z6werg 8z5wes 1zwi zwi1s + 6z1wo 1zy 2z1z zz8a zzi1s 1z� 1z� 6z�l. z�1le + 1z� 2z1�2b �1a6 �b1l �1che �3chi + �ch8sc �ch8sp �5chu �ck5a �d1a �d5era + �6d5ia �1e �5fa �f1l �ft6s �g1h + �g3le �6g5nan �g5str �1he �1hi �h1le + �h5ne 1�hnl �h1re �h5ri �h1ru �1hu + �h1w 6�i �1isc �6ische �5ism �5j + �1k �l1c �1le �8lei �l6schl �mi1e + �m8n �m8s �5na 5�nderu �ne5i8 �ng3l + �nk5l �1no �n6s5c �1pa �p6s5c 3�q + �r1c �1re �re8m 5�rgern �r6gl �1ri + 3�rmel �1ro �rt6s5 �1ru 3�rztl �5r� + �6s5chen �sen8s �s1th �ta8b �1te �teri4 + �ter5it �6thy �1ti 3�tk �1to �t8schl + �ts1p �5tu �ub1l �u1e 1�ug �u8ga + �u5i �1um. �1us. 1�u� �1z + �1b �1che �5chi + �ch8s2tei + �ch8str �cht6 + 5�6dem 5�ffn �1he �h1l8 �h1re �1hu + �1is �1ke 1�2ko 1�l. �l6k5l �l8pl + �1mu �5na �nig6s3 �1no �5o6t �pf3l + �p6s5c �1re �r8gli �1ri �r8tr �1ru + 5�sterr �1te �5th �1ti �1tu �1v �1w + �we8 �2z �b6e2 3�4ber1 �b1l �b1r + 5�2bu �1che �1chi �8ch3l �ch6s5c �8ck + �ck1a �ck5ers �d1a2 �6deu �di8t �2d1o4 + �d5s6 �ge4l5a �g1l �h5a �1he �8heh + �6h5erk �h1le �h1re �h1ru �1hu �h1w + �3k �1le �l4l5a �l8lo �l4ps �l6s5c + �1lu �n8da �n8fei �nk5l �n8za �n6zw + �5pi �1re �8rei �r8fl �r8fr �r8geng + �1ri �1ro �r8sta + + �1ru �se8n + �8sta �8stes + + �3ta �1te �1ti + �t8tr �1tu �t8zei �1v �1a8 5�a. + �8as �1b8 �1c �1d + 1�e �5ec 8�e8g 8�e8h + 2�1ei 8�em �1f8 �1g �1h + 1�i �1k �1l �1m + + �1n �1o �1p8 �5q + �1r �1s2 �st8 �1ta + �1te �t3hei �1ti �5to + �1tr 1�u8 6�5um �1v �1w + �1z + + 2s1ta. + i2s1tal + 2s1tani 2s1tan. + fe2s1ta + te2s1ta + + nd2ste + ve2ste + 3s2tec + 4s3techn + 3s2teg + 3s2teh + 3s2tein 3s2teig 3s2teif + 3s2tell 3s2telz + a4s3tel + 3s2temm + 3s2temp + 3s2tep + s3s2ter t3s2tern + 3s2teue + 6s4teuro + + bs2ti + te2s3ti + ve2sti + 3s2tic + + 3s2tieb + 3s2tieg + + 3s2tif + 3s2til + 3s2tim + 3s2tink + 3s2titu + + a2s1to + gu2s1to + ku2s1to + i2s1tol i2s1tor + ve2s1to + + 2s1tung + 2s7tus + o2s1tul + + + + aus3s4 + ens3s4 + gs3s4 + .mis2s1 + s2s1b8 + + s2s3chen + s2s3d + s2s5ec + + + 2s2s1ei + s2s3f + s2s1g + s2s3h + s2s3k + s2s3l + s2s3m + + s2s3n + s2s3p8 + s2s5q + s2s3r + s2s3s2 + sss2t8 + + + as2s3te + is2s3te + us2s3te + �s2s3te + s2st3hei + s2s3ti + s2s1to + s2s1tr + + 6ss5um + s2s3v + s2s3w + s2s3z + + + + 1cker. + 1ckert + 1ckad + 1cke. + 1ckel + 1cken + 4ck1ent + 1ckere + 1ckern + 1ckeru + 1ckie + 1ckig + 1ckun + + + diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index d5ce1bfc8d93d..b017ae9921b0e 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.test-with-dependencies' @@ -28,14 +27,14 @@ tasks.withType(StandaloneRestIntegTestTask).configureEach { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // These fail in CI but only when run as part of checkPart2 and not individually. // Tracked in : tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.withType(Test).configureEach { systemProperty 'es.failure_store_feature_flag_enabled', 'true' } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java new file mode 100644 index 0000000000000..2c9b7417b2832 --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/FailureStoreMetricsWithIncrementalBulkIT.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.FailureStoreMetrics; +import org.elasticsearch.action.bulk.IncrementalBulkService; +import org.elasticsearch.action.bulk.IndexDocFailureStoreStatus; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; +import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class FailureStoreMetricsWithIncrementalBulkIT extends ESIntegTestCase { + + private static final List METRICS = List.of( + FailureStoreMetrics.METRIC_TOTAL, + FailureStoreMetrics.METRIC_FAILURE_STORE, + FailureStoreMetrics.METRIC_REJECTED + ); + + private static final String DATA_STREAM_NAME = "data-stream-incremental"; + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class, TestTelemetryPlugin.class, MapperExtrasPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK.getKey(), "512B") + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK_SIZE.getKey(), "2048B") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK.getKey(), "2KB") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK_SIZE.getKey(), "1024B") + .build(); + } + + public void testShortCircuitFailure() throws Exception { + createDataStreamWithFailureStore(); + + String coordinatingOnlyNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {}); + IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, coordinatingOnlyNode); + try (IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest()) { + + AtomicBoolean nextRequested = new AtomicBoolean(true); + int successfullyStored = 0; + while (nextRequested.get()) { + nextRequested.set(false); + refCounted.incRef(); + handler.addItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, () -> nextRequested.set(true)); + successfullyStored++; + } + assertBusy(() -> assertTrue(nextRequested.get())); + var metrics = collectTelemetry(); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_TOTAL, DATA_STREAM_NAME, successfullyStored); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_FAILURE_STORE, DATA_STREAM_NAME, 0); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_REJECTED, DATA_STREAM_NAME, 0); + + // Introduce artificial pressure that will reject the following requests + String node = findNodeOfPrimaryShard(DATA_STREAM_NAME); + IndexingPressure primaryPressure = internalCluster().getInstance(IndexingPressure.class, node); + long memoryLimit = primaryPressure.stats().getMemoryLimit(); + long primaryRejections = primaryPressure.stats().getPrimaryRejections(); + try (Releasable ignored = primaryPressure.markPrimaryOperationStarted(10, memoryLimit, false)) { + while (primaryPressure.stats().getPrimaryRejections() == primaryRejections) { + while (nextRequested.get()) { + nextRequested.set(false); + refCounted.incRef(); + List> requests = new ArrayList<>(); + for (int i = 0; i < 20; ++i) { + requests.add(indexRequest(DATA_STREAM_NAME)); + } + handler.addItems(requests, refCounted::decRef, () -> nextRequested.set(true)); + } + assertBusy(() -> assertTrue(nextRequested.get())); + } + } + + while (nextRequested.get()) { + nextRequested.set(false); + refCounted.incRef(); + handler.addItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, () -> nextRequested.set(true)); + } + + assertBusy(() -> assertTrue(nextRequested.get())); + + PlainActionFuture future = new PlainActionFuture<>(); + handler.lastItems(List.of(indexRequest(DATA_STREAM_NAME)), refCounted::decRef, future); + + BulkResponse bulkResponse = safeGet(future); + + for (int i = 0; i < bulkResponse.getItems().length; ++i) { + // the first requests were successful + boolean hasFailed = i >= successfullyStored; + assertThat(bulkResponse.getItems()[i].isFailed(), is(hasFailed)); + assertThat(bulkResponse.getItems()[i].getFailureStoreStatus(), is(IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN)); + } + + metrics = collectTelemetry(); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_TOTAL, DATA_STREAM_NAME, bulkResponse.getItems().length); + assertDataStreamMetric( + metrics, + FailureStoreMetrics.METRIC_REJECTED, + DATA_STREAM_NAME, + bulkResponse.getItems().length - successfullyStored + ); + assertDataStreamMetric(metrics, FailureStoreMetrics.METRIC_FAILURE_STORE, DATA_STREAM_NAME, 0); + } + } + + private void createDataStreamWithFailureStore() throws IOException { + TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request( + "template-incremental" + ); + request.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(DATA_STREAM_NAME + "*")) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .template(new Template(null, new CompressedXContent(""" + { + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "count": { + "type": "long" + } + } + }"""), null)) + .build() + ); + assertAcked(safeGet(client().execute(TransportPutComposableIndexTemplateAction.TYPE, request))); + + final var createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + DATA_STREAM_NAME + ); + assertAcked(safeGet(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest))); + } + + private static Map> collectTelemetry() { + Map> measurements = new HashMap<>(); + for (PluginsService pluginsService : internalCluster().getInstances(PluginsService.class)) { + final TestTelemetryPlugin telemetryPlugin = pluginsService.filterPlugins(TestTelemetryPlugin.class).findFirst().orElseThrow(); + + telemetryPlugin.collect(); + + for (String metricName : METRICS) { + measurements.put(metricName, telemetryPlugin.getLongCounterMeasurement(metricName)); + } + } + return measurements; + } + + private void assertDataStreamMetric(Map> metrics, String metric, String dataStreamName, int expectedValue) { + List measurements = metrics.get(metric); + assertThat(measurements, notNullValue()); + long totalValue = measurements.stream() + .filter(m -> m.attributes().get("data_stream").equals(dataStreamName)) + .mapToLong(Measurement::getLong) + .sum(); + assertThat(totalValue, equalTo((long) expectedValue)); + } + + private static IndexRequest indexRequest(String dataStreamName) { + String time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(System.currentTimeMillis()); + String value = "1"; + return new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE) + .source(Strings.format("{\"%s\":\"%s\", \"count\": %s}", DEFAULT_TIMESTAMP_FIELD, time, value), XContentType.JSON); + } + + protected static String findNodeOfPrimaryShard(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + new String[] { dataStreamName } + ); + GetDataStreamAction.Response getDataStreamResponse = safeGet(client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest)); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + DataStream dataStream = getDataStreamResponse.getDataStreams().getFirst().getDataStream(); + assertThat(dataStream.getName(), equalTo(DATA_STREAM_NAME)); + assertThat(dataStream.getIndices().size(), equalTo(1)); + String backingIndex = dataStream.getIndices().getFirst().getName(); + assertThat(backingIndex, backingIndexEqualTo(DATA_STREAM_NAME, 1)); + + Index index = resolveIndex(backingIndex); + int shardId = 0; + for (String node : internalCluster().getNodeNames()) { + var indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + if (indexService != null) { + IndexShard shard = indexService.getShardOrNull(shardId); + if (shard != null && shard.isActive() && shard.routingEntry().primary()) { + return node; + } + } + } + throw new AssertionError("IndexShard instance not found for shard " + new ShardId(index, shardId)); + } +} diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportActionIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportActionIT.java new file mode 100644 index 0000000000000..fdc96892d4b27 --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportActionIT.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamRequest; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.datastreams.task.ReindexDataStreamTask; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; + +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class ReindexDataStreamTransportActionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class); + } + + public void testNonExistentDataStream() { + String nonExistentDataStreamName = randomAlphaOfLength(50); + ReindexDataStreamRequest reindexDataStreamRequest = new ReindexDataStreamRequest(nonExistentDataStreamName); + assertThrows( + ResourceNotFoundException.class, + () -> client().execute(new ActionType(ReindexDataStreamAction.NAME), reindexDataStreamRequest) + .actionGet() + ); + } + + public void testAlreadyUpToDateDataStream() throws Exception { + String dataStreamName = randomAlphaOfLength(50).toLowerCase(Locale.ROOT); + ReindexDataStreamRequest reindexDataStreamRequest = new ReindexDataStreamRequest(dataStreamName); + createDataStream(dataStreamName); + ReindexDataStreamResponse response = client().execute( + new ActionType(ReindexDataStreamAction.NAME), + reindexDataStreamRequest + ).actionGet(); + String persistentTaskId = response.getTaskId(); + assertThat(persistentTaskId, equalTo("reindex-data-stream-" + dataStreamName)); + AtomicReference runningTask = new AtomicReference<>(); + for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { + TaskManager taskManager = transportService.getTaskManager(); + Map tasksMap = taskManager.getCancellableTasks(); + Optional> optionalTask = taskManager.getCancellableTasks() + .entrySet() + .stream() + .filter(entry -> entry.getValue().getType().equals("persistent")) + .filter( + entry -> entry.getValue() instanceof ReindexDataStreamTask + && persistentTaskId.equals((((ReindexDataStreamTask) entry.getValue()).getPersistentTaskId())) + ) + .findAny(); + optionalTask.ifPresent( + longCancellableTaskEntry -> runningTask.compareAndSet(null, (ReindexDataStreamTask) longCancellableTaskEntry.getValue()) + ); + } + ReindexDataStreamTask task = runningTask.get(); + assertNotNull(task); + assertThat(task.getStatus().complete(), equalTo(true)); + assertNull(task.getStatus().exception()); + assertThat(task.getStatus().pending(), equalTo(0)); + assertThat(task.getStatus().inProgress(), equalTo(0)); + assertThat(task.getStatus().errors().size(), equalTo(0)); + } + + private void createDataStream(String dataStreamName) { + final TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest = + new TransportPutComposableIndexTemplateAction.Request("my-template"); + putComposableTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .template(Template.builder().build()) + .build() + ); + final AcknowledgedResponse putComposableTemplateResponse = safeGet( + client().execute(TransportPutComposableIndexTemplateAction.TYPE, putComposableTemplateRequest) + ); + assertThat(putComposableTemplateResponse.isAcknowledged(), is(true)); + + final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + dataStreamName + ); + final AcknowledgedResponse createDataStreamResponse = safeGet( + client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) + ); + assertThat(createDataStreamResponse.isAcknowledged(), is(true)); + indexDocs(dataStreamName); + safeGet(new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(false).execute()); + indexDocs(dataStreamName); + safeGet(new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(false).execute()); + } + + private void indexDocs(String dataStreamName) { + int docs = randomIntBetween(5, 10); + CountDownLatch countDownLatch = new CountDownLatch(docs); + for (int i = 0; i < docs; i++) { + var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE); + final String doc = "{ \"@timestamp\": \"2099-05-06T16:21:15.000Z\", \"message\": \"something cool happened\" }"; + indexRequest.source(doc, XContentType.JSON); + client().index(indexRequest, new ActionListener<>() { + @Override + public void onResponse(DocWriteResponse docWriteResponse) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Indexing request should have succeeded eventually, failed with " + e.getMessage()); + } + }); + } + safeAwait(countDownLatch); + } + +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java index 01c63be448e62..2dc6fd84fdfe7 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/AbstractDataStreamIT.java @@ -45,6 +45,7 @@ public abstract class AbstractDataStreamIT extends ESRestTestCase { // tests such as testIgnoreDynamicBeyondLimit. .setting("xpack.apm_data.enabled", "false") .setting("xpack.otel_data.registry.enabled", "false") + .setting("cluster.logsdb.enabled", "false") .build(); protected RestClient client; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index f60a3e5c47a7f..f090186480b76 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -9,7 +9,6 @@ package org.elasticsearch.datastreams; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction; import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; @@ -17,7 +16,6 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import java.util.Map; import java.util.Set; /** @@ -25,14 +23,8 @@ */ public class DataStreamFeatures implements FeatureSpecification { - public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); public static final NodeFeature DATA_STREAM_FAILURE_STORE_TSDB_FIX = new NodeFeature("data_stream.failure_store.tsdb_fix"); - @Override - public Map getHistoricalFeatures() { - return Map.of(DATA_STREAM_LIFECYCLE, Version.V_8_11_0); - } - @Override public Set getFeatures() { return Set.of( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index cb7445705537a..2f3b63d27ca35 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -19,19 +19,23 @@ import org.elasticsearch.action.datastreams.MigrateToDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.datastreams.PromoteDataStreamAction; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction; import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.action.CreateDataStreamTransportAction; @@ -40,6 +44,7 @@ import org.elasticsearch.datastreams.action.MigrateToDataStreamTransportAction; import org.elasticsearch.datastreams.action.ModifyDataStreamsTransportAction; import org.elasticsearch.datastreams.action.PromoteDataStreamTransportAction; +import org.elasticsearch.datastreams.action.ReindexDataStreamTransportAction; import org.elasticsearch.datastreams.action.TransportGetDataStreamsAction; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; @@ -73,14 +78,27 @@ import org.elasticsearch.datastreams.rest.RestMigrateToDataStreamAction; import org.elasticsearch.datastreams.rest.RestModifyDataStreamsAction; import org.elasticsearch.datastreams.rest.RestPromoteDataStreamAction; +import org.elasticsearch.datastreams.task.ReindexDataStreamPersistentTaskExecutor; +import org.elasticsearch.datastreams.task.ReindexDataStreamPersistentTaskState; +import org.elasticsearch.datastreams.task.ReindexDataStreamStatus; +import org.elasticsearch.datastreams.task.ReindexDataStreamTask; +import org.elasticsearch.datastreams.task.ReindexDataStreamTaskParams; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.HealthPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; import java.io.IOException; import java.time.Clock; @@ -93,7 +111,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; -public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlugin { +public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlugin, PersistentTaskPlugin { public static final Setting TIME_SERIES_POLL_INTERVAL = Setting.timeSetting( "time_series.poll_interval", @@ -244,6 +262,7 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(PutDataStreamOptionsAction.INSTANCE, TransportPutDataStreamOptionsAction.class)); actions.add(new ActionHandler<>(DeleteDataStreamOptionsAction.INSTANCE, TransportDeleteDataStreamOptionsAction.class)); } + actions.add(new ActionHandler<>(ReindexDataStreamAction.INSTANCE, ReindexDataStreamTransportAction.class)); return actions; } @@ -302,4 +321,48 @@ public void close() throws IOException { public Collection getHealthIndicatorServices() { return List.of(dataStreamLifecycleHealthIndicatorService.get()); } + + @Override + public List getNamedXContent() { + return List.of( + new NamedXContentRegistry.Entry( + PersistentTaskState.class, + new ParseField(ReindexDataStreamPersistentTaskState.NAME), + ReindexDataStreamPersistentTaskState::fromXContent + ), + new NamedXContentRegistry.Entry( + PersistentTaskParams.class, + new ParseField(ReindexDataStreamTaskParams.NAME), + ReindexDataStreamTaskParams::fromXContent + ) + ); + } + + @Override + public List getNamedWriteables() { + return List.of( + new NamedWriteableRegistry.Entry( + PersistentTaskState.class, + ReindexDataStreamPersistentTaskState.NAME, + ReindexDataStreamPersistentTaskState::new + ), + new NamedWriteableRegistry.Entry( + PersistentTaskParams.class, + ReindexDataStreamTaskParams.NAME, + ReindexDataStreamTaskParams::new + ), + new NamedWriteableRegistry.Entry(Task.Status.class, ReindexDataStreamStatus.NAME, ReindexDataStreamStatus::new) + ); + } + + @Override + public List> getPersistentTasksExecutor( + ClusterService clusterService, + ThreadPool threadPool, + Client client, + SettingsModule settingsModule, + IndexNameExpressionResolver expressionResolver + ) { + return List.of(new ReindexDataStreamPersistentTaskExecutor(client, clusterService, ReindexDataStreamTask.TASK_NAME, threadPool)); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportAction.java new file mode 100644 index 0000000000000..0a86985c6c7b2 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportAction.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.action; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamRequest; +import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.datastreams.task.ReindexDataStreamTask; +import org.elasticsearch.datastreams.task.ReindexDataStreamTaskParams; +import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/* + * This transport action creates a new persistent task for reindexing the source data stream given in the request. On successful creation + * of the persistent task, it responds with the persistent task id so that the user can monitor the persistent task. + */ +public class ReindexDataStreamTransportAction extends HandledTransportAction { + private final PersistentTasksService persistentTasksService; + private final TransportService transportService; + private final ClusterService clusterService; + + @Inject + public ReindexDataStreamTransportAction( + TransportService transportService, + ActionFilters actionFilters, + PersistentTasksService persistentTasksService, + ClusterService clusterService + ) { + super( + ReindexDataStreamAction.NAME, + true, + transportService, + actionFilters, + ReindexDataStreamRequest::new, + transportService.getThreadPool().executor(ThreadPool.Names.GENERIC) + ); + this.transportService = transportService; + this.persistentTasksService = persistentTasksService; + this.clusterService = clusterService; + } + + @Override + protected void doExecute(Task task, ReindexDataStreamRequest request, ActionListener listener) { + String sourceDataStreamName = request.getSourceDataStream(); + Metadata metadata = clusterService.state().metadata(); + DataStream dataStream = metadata.dataStreams().get(sourceDataStreamName); + if (dataStream == null) { + listener.onFailure(new ResourceNotFoundException("Data stream named [{}] does not exist", sourceDataStreamName)); + return; + } + int totalIndices = dataStream.getIndices().size(); + int totalIndicesToBeUpgraded = (int) dataStream.getIndices() + .stream() + .filter(index -> metadata.index(index).getCreationVersion().isLegacyIndexVersion()) + .count(); + ReindexDataStreamTaskParams params = new ReindexDataStreamTaskParams( + sourceDataStreamName, + transportService.getThreadPool().absoluteTimeInMillis(), + totalIndices, + totalIndicesToBeUpgraded + ); + String persistentTaskId = getPersistentTaskId(sourceDataStreamName); + persistentTasksService.sendStartRequest( + persistentTaskId, + ReindexDataStreamTask.TASK_NAME, + params, + null, + ActionListener.wrap(startedTask -> listener.onResponse(new ReindexDataStreamResponse(persistentTaskId)), listener::onFailure) + ); + } + + private String getPersistentTaskId(String dataStreamName) throws ResourceAlreadyExistsException { + return "reindex-data-stream-" + dataStreamName; + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java new file mode 100644 index 0000000000000..f10d2e7b356fb --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.List; +import java.util.Map; + +public class ReindexDataStreamPersistentTaskExecutor extends PersistentTasksExecutor { + private static final TimeValue TASK_KEEP_ALIVE_TIME = TimeValue.timeValueDays(1); + private final Client client; + private final ClusterService clusterService; + private final ThreadPool threadPool; + + public ReindexDataStreamPersistentTaskExecutor(Client client, ClusterService clusterService, String taskName, ThreadPool threadPool) { + super(taskName, threadPool.generic()); + this.client = client; + this.clusterService = clusterService; + this.threadPool = threadPool; + } + + @Override + protected ReindexDataStreamTask createTask( + long id, + String type, + String action, + TaskId parentTaskId, + PersistentTasksCustomMetadata.PersistentTask taskInProgress, + Map headers + ) { + ReindexDataStreamTaskParams params = taskInProgress.getParams(); + return new ReindexDataStreamTask( + params.startTime(), + params.totalIndices(), + params.totalIndicesToBeUpgraded(), + threadPool, + id, + type, + action, + "id=" + taskInProgress.getId(), + parentTaskId, + headers + ); + } + + @Override + protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTaskParams params, PersistentTaskState state) { + String sourceDataStream = params.getSourceDataStream(); + GetDataStreamAction.Request request = new GetDataStreamAction.Request(TimeValue.MAX_VALUE, new String[] { sourceDataStream }); + assert task instanceof ReindexDataStreamTask; + final ReindexDataStreamTask reindexDataStreamTask = (ReindexDataStreamTask) task; + client.execute(GetDataStreamAction.INSTANCE, request, ActionListener.wrap(response -> { + List dataStreamInfos = response.getDataStreams(); + if (dataStreamInfos.size() == 1) { + List indices = dataStreamInfos.getFirst().getDataStream().getIndices(); + List indicesToBeReindexed = indices.stream() + .filter(index -> clusterService.state().getMetadata().index(index).getCreationVersion().isLegacyIndexVersion()) + .toList(); + reindexDataStreamTask.setPendingIndices(indicesToBeReindexed.stream().map(Index::getName).toList()); + for (Index index : indicesToBeReindexed) { + // TODO This is just a placeholder. This is where the real data stream reindex logic will go + } + + completeSuccessfulPersistentTask(reindexDataStreamTask); + } else { + completeFailedPersistentTask(reindexDataStreamTask, new ElasticsearchException("data stream does not exist")); + } + }, reindexDataStreamTask::markAsFailed)); + } + + private void completeSuccessfulPersistentTask(ReindexDataStreamTask persistentTask) { + persistentTask.reindexSucceeded(); + threadPool.schedule(persistentTask::markAsCompleted, getTimeToLive(persistentTask), threadPool.generic()); + } + + private void completeFailedPersistentTask(ReindexDataStreamTask persistentTask, Exception e) { + persistentTask.reindexFailed(e); + threadPool.schedule(() -> persistentTask.markAsFailed(e), getTimeToLive(persistentTask), threadPool.generic()); + } + + private TimeValue getTimeToLive(ReindexDataStreamTask reindexDataStreamTask) { + PersistentTasksCustomMetadata persistentTasksCustomMetadata = clusterService.state() + .getMetadata() + .custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksCustomMetadata.PersistentTask persistentTask = persistentTasksCustomMetadata.getTask( + reindexDataStreamTask.getPersistentTaskId() + ); + PersistentTaskState state = persistentTask.getState(); + final long completionTime; + if (state == null) { + completionTime = threadPool.absoluteTimeInMillis(); + reindexDataStreamTask.updatePersistentTaskState( + new ReindexDataStreamPersistentTaskState(completionTime), + ActionListener.noop() + ); + } else { + completionTime = ((ReindexDataStreamPersistentTaskState) state).completionTime(); + } + return TimeValue.timeValueMillis(TASK_KEEP_ALIVE_TIME.millis() - (threadPool.absoluteTimeInMillis() - completionTime)); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskState.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskState.java new file mode 100644 index 0000000000000..d6f32a3d34a7a --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskState.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public record ReindexDataStreamPersistentTaskState(long completionTime) implements Task.Status, PersistentTaskState { + public static final String NAME = ReindexDataStreamTask.TASK_NAME; + private static final String COMPLETION_TIME_FIELD = "completion_time"; + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + true, + args -> new ReindexDataStreamPersistentTaskState((long) args[0]) + ); + static { + PARSER.declareLong(constructorArg(), new ParseField(COMPLETION_TIME_FIELD)); + } + + public ReindexDataStreamPersistentTaskState(StreamInput in) throws IOException { + this(in.readLong()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(completionTime); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(COMPLETION_TIME_FIELD, completionTime); + builder.endObject(); + return builder; + } + + public static ReindexDataStreamPersistentTaskState fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatus.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatus.java new file mode 100644 index 0000000000000..10dfded853a13 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatus.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public record ReindexDataStreamStatus( + long persistentTaskStartTime, + int totalIndices, + int totalIndicesToBeUpgraded, + boolean complete, + Exception exception, + int inProgress, + int pending, + List> errors +) implements Task.Status { + public ReindexDataStreamStatus { + Objects.requireNonNull(errors); + } + + public static final String NAME = "ReindexDataStreamStatus"; + + public ReindexDataStreamStatus(StreamInput in) throws IOException { + this( + in.readLong(), + in.readInt(), + in.readInt(), + in.readBoolean(), + in.readException(), + in.readInt(), + in.readInt(), + in.readCollectionAsList(in1 -> Tuple.tuple(in1.readString(), in1.readException())) + ); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(persistentTaskStartTime); + out.writeInt(totalIndices); + out.writeInt(totalIndicesToBeUpgraded); + out.writeBoolean(complete); + out.writeException(exception); + out.writeInt(inProgress); + out.writeInt(pending); + out.writeCollection(errors, (out1, tuple) -> { + out1.writeString(tuple.v1()); + out1.writeException(tuple.v2()); + }); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("start_time", persistentTaskStartTime); + builder.field("complete", complete); + builder.field("total_indices", totalIndices); + builder.field("total_indices_requiring_upgrade", totalIndicesToBeUpgraded); + builder.field("successes", totalIndicesToBeUpgraded - (inProgress + pending + errors.size())); + builder.field("in_progress", inProgress); + builder.field("pending", pending); + builder.startArray("errors"); + for (Tuple error : errors) { + builder.startObject(); + builder.field("index", error.v1()); + builder.field("message", error.v2().getMessage()); + builder.endObject(); + } + builder.endArray(); + if (exception != null) { + builder.field("exception", exception.getMessage()); + } + builder.endObject(); + return builder; + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java new file mode 100644 index 0000000000000..2ae244679659f --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.core.Tuple; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class ReindexDataStreamTask extends AllocatedPersistentTask { + public static final String TASK_NAME = "reindex-data-stream"; + private final long persistentTaskStartTime; + private final int totalIndices; + private final int totalIndicesToBeUpgraded; + private final ThreadPool threadPool; + private boolean complete = false; + private Exception exception; + private List inProgress = new ArrayList<>(); + private List pending = List.of(); + private List> errors = new ArrayList<>(); + + public ReindexDataStreamTask( + long persistentTaskStartTime, + int totalIndices, + int totalIndicesToBeUpgraded, + ThreadPool threadPool, + long id, + String type, + String action, + String description, + TaskId parentTask, + Map headers + ) { + super(id, type, action, description, parentTask, headers); + this.persistentTaskStartTime = persistentTaskStartTime; + this.totalIndices = totalIndices; + this.totalIndicesToBeUpgraded = totalIndicesToBeUpgraded; + this.threadPool = threadPool; + } + + @Override + public ReindexDataStreamStatus getStatus() { + return new ReindexDataStreamStatus( + persistentTaskStartTime, + totalIndices, + totalIndicesToBeUpgraded, + complete, + exception, + inProgress.size(), + pending.size(), + errors + ); + } + + public void reindexSucceeded() { + this.complete = true; + } + + public void reindexFailed(Exception e) { + this.complete = true; + this.exception = e; + } + + public void setInProgressIndices(List inProgressIndices) { + this.inProgress = inProgressIndices; + } + + public void setPendingIndices(List pendingIndices) { + this.pending = pendingIndices; + } + + public void addErrorIndex(String index, Exception error) { + this.errors.add(Tuple.tuple(index, error)); + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParams.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParams.java new file mode 100644 index 0000000000000..5efbc6b672216 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParams.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public record ReindexDataStreamTaskParams(String sourceDataStream, long startTime, int totalIndices, int totalIndicesToBeUpgraded) + implements + PersistentTaskParams { + + public static final String NAME = ReindexDataStreamTask.TASK_NAME; + private static final String SOURCE_DATA_STREAM_FIELD = "source_data_stream"; + private static final String START_TIME_FIELD = "start_time"; + private static final String TOTAL_INDICES_FIELD = "total_indices"; + private static final String TOTAL_INDICES_TO_BE_UPGRADED_FIELD = "total_indices_to_be_upgraded"; + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + true, + args -> new ReindexDataStreamTaskParams((String) args[0], (long) args[1], (int) args[2], (int) args[3]) + ); + static { + PARSER.declareString(constructorArg(), new ParseField(SOURCE_DATA_STREAM_FIELD)); + PARSER.declareLong(constructorArg(), new ParseField(START_TIME_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(TOTAL_INDICES_FIELD)); + PARSER.declareInt(constructorArg(), new ParseField(TOTAL_INDICES_TO_BE_UPGRADED_FIELD)); + } + + public ReindexDataStreamTaskParams(StreamInput in) throws IOException { + this(in.readString(), in.readLong(), in.readInt(), in.readInt()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.REINDEX_DATA_STREAMS; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(sourceDataStream); + out.writeLong(startTime); + out.writeInt(totalIndices); + out.writeInt(totalIndicesToBeUpgraded); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(SOURCE_DATA_STREAM_FIELD, sourceDataStream) + .field(START_TIME_FIELD, startTime) + .field(TOTAL_INDICES_FIELD, totalIndices) + .field(TOTAL_INDICES_TO_BE_UPGRADED_FIELD, totalIndicesToBeUpgraded) + .endObject(); + } + + public String getSourceDataStream() { + return sourceDataStream; + } + + public static ReindexDataStreamTaskParams fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java index a3995d7462b32..e009db7209eab 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java @@ -48,7 +48,8 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck( "enabled", timestampMapping(true, b -> b.startObject("@timestamp").field("type", "date").endObject()), - timestampMapping(false, b -> b.startObject("@timestamp").field("type", "date").endObject()) + timestampMapping(false, b -> b.startObject("@timestamp").field("type", "date").endObject()), + dm -> {} ); checker.registerUpdateCheck( timestampMapping(false, b -> b.startObject("@timestamp").field("type", "date").endObject()), diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskStateTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskStateTests.java new file mode 100644 index 0000000000000..be11bff131909 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskStateTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class ReindexDataStreamPersistentTaskStateTests extends AbstractXContentSerializingTestCase { + @Override + protected ReindexDataStreamPersistentTaskState doParseInstance(XContentParser parser) throws IOException { + return ReindexDataStreamPersistentTaskState.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return ReindexDataStreamPersistentTaskState::new; + } + + @Override + protected ReindexDataStreamPersistentTaskState createTestInstance() { + return new ReindexDataStreamPersistentTaskState(randomNegativeLong()); + } + + @Override + protected ReindexDataStreamPersistentTaskState mutateInstance(ReindexDataStreamPersistentTaskState instance) throws IOException { + return new ReindexDataStreamPersistentTaskState(instance.completionTime() + 1); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatusTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatusTests.java new file mode 100644 index 0000000000000..8f0fabc2ce7ee --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatusTests.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static java.util.Map.entry; +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.equalTo; + +public class ReindexDataStreamStatusTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return ReindexDataStreamStatus::new; + } + + @Override + protected ReindexDataStreamStatus createTestInstance() { + return new ReindexDataStreamStatus( + randomLong(), + randomNegativeInt(), + randomNegativeInt(), + randomBoolean(), + nullableTestException(), + randomNegativeInt(), + randomNegativeInt(), + randomErrorList() + ); + } + + private Exception nullableTestException() { + if (randomBoolean()) { + return testException(); + } + return null; + } + + private Exception testException() { + /* + * Unfortunately ElasticsearchException doesn't have an equals and just falls back to Object::equals. So we can't test for equality + * when we're using an exception. So always just use null. + */ + return null; + } + + private List randomList() { + return randomList(0); + } + + private List randomList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> randomAlphaOfLength(50)); + } + + private List> randomErrorList() { + return randomErrorList(0); + } + + private List> randomErrorList(int minSize) { + return randomList(minSize, Math.max(minSize, 100), () -> Tuple.tuple(randomAlphaOfLength(30), testException())); + } + + @Override + protected ReindexDataStreamStatus mutateInstance(ReindexDataStreamStatus instance) throws IOException { + long startTime = instance.persistentTaskStartTime(); + int totalIndices = instance.totalIndices(); + int totalIndicesToBeUpgraded = instance.totalIndicesToBeUpgraded(); + boolean complete = instance.complete(); + Exception exception = instance.exception(); + int inProgress = instance.inProgress(); + int pending = instance.pending(); + List> errors = instance.errors(); + switch (randomIntBetween(0, 6)) { + case 0 -> startTime = randomLong(); + case 1 -> totalIndices = totalIndices + 1; + case 2 -> totalIndicesToBeUpgraded = totalIndicesToBeUpgraded + 1; + case 3 -> complete = complete == false; + case 4 -> inProgress = inProgress + 1; + case 5 -> pending = pending + 1; + case 6 -> errors = randomErrorList(errors.size() + 1); + default -> throw new UnsupportedOperationException(); + } + return new ReindexDataStreamStatus( + startTime, + totalIndices, + totalIndicesToBeUpgraded, + complete, + exception, + inProgress, + pending, + errors + ); + } + + public void testToXContent() throws IOException { + ReindexDataStreamStatus status = new ReindexDataStreamStatus( + 1234L, + 200, + 100, + true, + new ElasticsearchException("the whole task failed"), + 12, + 8, + List.of( + Tuple.tuple("index7", new ElasticsearchException("index7 failed")), + Tuple.tuple("index8", new ElasticsearchException("index8 " + "failed")) + ) + ); + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent)) { + builder.humanReadable(true); + status.toXContent(builder, EMPTY_PARAMS); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map parserMap = parser.map(); + assertThat( + parserMap, + equalTo( + Map.ofEntries( + entry("start_time", 1234), + entry("total_indices", 200), + entry("total_indices_requiring_upgrade", 100), + entry("complete", true), + entry("exception", "the whole task failed"), + entry("successes", 78), + entry("in_progress", 12), + entry("pending", 8), + entry( + "errors", + List.of( + Map.of("index", "index7", "message", "index7 failed"), + Map.of("index", "index8", "message", "index8 failed") + ) + ) + ) + ) + ); + } + } + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParamsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParamsTests.java new file mode 100644 index 0000000000000..55098bf4a68d5 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParamsTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.datastreams.task; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.equalTo; + +public class ReindexDataStreamTaskParamsTests extends AbstractXContentSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return ReindexDataStreamTaskParams::new; + } + + @Override + protected ReindexDataStreamTaskParams createTestInstance() { + return new ReindexDataStreamTaskParams(randomAlphaOfLength(50), randomLong(), randomNonNegativeInt(), randomNonNegativeInt()); + } + + @Override + protected ReindexDataStreamTaskParams mutateInstance(ReindexDataStreamTaskParams instance) { + String sourceDataStream = instance.sourceDataStream(); + long startTime = instance.startTime(); + int totalIndices = instance.totalIndices(); + int totalIndicesToBeUpgraded = instance.totalIndicesToBeUpgraded(); + switch (randomIntBetween(0, 3)) { + case 0 -> sourceDataStream = randomAlphaOfLength(50); + case 1 -> startTime = randomLong(); + case 2 -> totalIndices = totalIndices + 1; + case 3 -> totalIndices = totalIndicesToBeUpgraded + 1; + default -> throw new UnsupportedOperationException(); + } + return new ReindexDataStreamTaskParams(sourceDataStream, startTime, totalIndices, totalIndicesToBeUpgraded); + } + + @Override + protected ReindexDataStreamTaskParams doParseInstance(XContentParser parser) { + return ReindexDataStreamTaskParams.fromXContent(parser); + } + + public void testToXContent() throws IOException { + ReindexDataStreamTaskParams params = createTestInstance(); + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent)) { + builder.humanReadable(true); + params.toXContent(builder, EMPTY_PARAMS); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + Map parserMap = parser.map(); + assertThat(parserMap.get("source_data_stream"), equalTo(params.sourceDataStream())); + assertThat(((Number) parserMap.get("start_time")).longValue(), equalTo(params.startTime())); + } + } + } +} diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 92e843fa31a63..8fe2b82fe21fb 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -143,7 +141,7 @@ tasks.named("thirdPartyAudit").configure { ignoreMissingClasses() } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { tasks.named("test").configure { enabled = false } tasks.named("yamlRestTest").configure { enabled = false }; tasks.named("yamlRestCompatTest").configure { enabled = false }; diff --git a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle index b51fa497c8492..29cc6d7184bf2 100644 --- a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle +++ b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -24,7 +23,7 @@ dependencies { // once we are ready to test migrations from 8.x to 9.x, we can set the compatible version to 8.0.0 // see https://github.com/elastic/elasticsearch/pull/93666 -BuildParams.bwcVersions.withWireCompatible(v -> v.before("9.0.0")) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(v -> v.before("9.0.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index f8c8d2bd359f3..dd177fed5732a 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -41,7 +41,6 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; @@ -67,6 +66,7 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.jsonSimulatePipelineRequest; import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDefaultDatabases; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; @@ -494,7 +494,7 @@ private SimulateDocumentBaseResult simulatePipeline() throws IOException { builder.endObject(); bytes = BytesReference.bytes(builder); } - SimulatePipelineRequest simulateRequest = new SimulatePipelineRequest(bytes, XContentType.JSON); + SimulatePipelineRequest simulateRequest = jsonSimulatePipelineRequest(bytes); simulateRequest.setId("_id"); // Avoid executing on a coordinating only node, because databases are not available there and geoip processor won't do any lookups. // (some test seeds repeatedly hit such nodes causing failures) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 61ca050d91c13..2f96aa3cbc69a 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -268,7 +268,7 @@ private static Set pipelinesWithGeoIpProcessor(ClusterState clusterState Set ids = new HashSet<>(); // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph for (PipelineConfiguration configuration : configurations) { - List> processors = (List>) configuration.getConfigAsMap().get(Pipeline.PROCESSORS_KEY); + List> processors = (List>) configuration.getConfig().get(Pipeline.PROCESSORS_KEY); if (hasAtLeastOneGeoipProcessor(processors, downloadDatabaseOnPipelineCreation)) { ids.add(configuration.getId()); } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index defd20b64762b..cc0b0122e9cce 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -13,12 +13,10 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptType; @@ -39,6 +37,7 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.newPutStoredScriptTestRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -467,12 +466,6 @@ public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, l } private void putJsonStoredScript(String id, String jsonContent) { - assertAcked( - safeExecute( - TransportPutStoredScriptAction.TYPE, - new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id(id) - .content(new BytesArray(jsonContent), XContentType.JSON) - ) - ); + assertAcked(safeExecute(TransportPutStoredScriptAction.TYPE, newPutStoredScriptTestRequest(id, jsonContent))); } } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt index a739635e85a9c..875b9a1dac3e8 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt @@ -132,6 +132,21 @@ class org.elasticsearch.script.field.SeqNoDocValuesField @dynamic_type { class org.elasticsearch.script.field.VersionDocValuesField @dynamic_type { } +class org.elasticsearch.script.field.vectors.MultiDenseVector { + MultiDenseVector EMPTY + float[] getMagnitudes() + + Iterator getVectors() + boolean isEmpty() + int getDims() + int size() +} + +class org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField { + MultiDenseVector get() + MultiDenseVector get(MultiDenseVector) +} + class org.elasticsearch.script.field.vectors.DenseVector { DenseVector EMPTY float getMagnitude() diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt index e76db7cfb1d26..5a1d8c002aa17 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt @@ -50,5 +50,7 @@ static_import { double cosineSimilarity(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity double dotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct double hamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$Hamming + double maxSimDotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.MultiVectorScoreScriptUtils$MaxSimDotProduct + double maxSimInvHamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.MultiVectorScoreScriptUtils$MaxSimInvHamming } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt index 7ab9eb32852b6..b2db0d1006d40 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.txt @@ -123,6 +123,11 @@ class org.elasticsearch.index.mapper.vectors.DenseVectorScriptDocValues { float getMagnitude() } +class org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues { + Iterator getVectorValues() + float[] getMagnitudes() +} + class org.apache.lucene.util.BytesRef { byte[] bytes int offset diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_multi_dense_vector_max_sim.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_multi_dense_vector_max_sim.yml new file mode 100644 index 0000000000000..77d4b70cdfcae --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/141_multi_dense_vector_max_sim.yml @@ -0,0 +1,206 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ multi_dense_vector_script_max_sim_with_bugfix ] + test_runner_features: capabilities + reason: "Support for multi dense vector max-sim functions capability required" + - skip: + features: headers + + - do: + indices.create: + index: test-index + body: + settings: + number_of_shards: 1 + mappings: + properties: + vector: + type: multi_dense_vector + dims: 5 + byte_vector: + type: multi_dense_vector + dims: 5 + element_type: byte + bit_vector: + type: multi_dense_vector + dims: 40 + element_type: bit + - do: + index: + index: test-index + id: "1" + body: + vector: [[230.0, 300.33, -34.8988, 15.555, -200.0], [-0.5, 100.0, -13, 14.8, -156.0]] + byte_vector: [[8, 5, -15, 1, -7], [-1, 115, -3, 4, -128]] + bit_vector: [[8, 5, -15, 1, -7], [-1, 115, -3, 4, -128]] + + - do: + index: + index: test-index + id: "3" + body: + vector: [[0.5, 111.3, -13.0, 14.8, -156.0]] + byte_vector: [[2, 18, -5, 0, -124]] + bit_vector: [[2, 18, -5, 0, -124]] + + - do: + indices.refresh: {} +--- +"Test max-sim dot product scoring": + - skip: + features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimDotProduct(params.query_vector, 'vector')" + params: + query_vector: [[1, 2, 1, 1, 1]] + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 611.316, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 68.90001, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimDotProduct(params.query_vector, 'byte_vector')" + params: + query_vector: [[1, 2, 1, 1, 0]] + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 230, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 33, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimDotProduct(params.query_vector, 'bit_vector')" + params: + query_vector: [[1, 2, 1, 1, 0]] + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 3, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 2, error: 0.01}} + +# doing max-sim dot product with a vector where the stored bit vectors are used as masks + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimDotProduct(params.query_vector, 'bit_vector')" + params: + query_vector: [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]] + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 220, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 147, error: 0.01}} +--- +"Test max-sim inv hamming scoring": + - skip: + features: close_to + + # inv hamming doesn't apply to float vectors + - do: + catch: bad_request + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimInvHamming(params.query_vector, 'vector')" + params: + query_vector: [[1, 2, 1, 1, 1]] + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimInvHamming(params.query_vector, 'byte_vector')" + params: + query_vector: [[1, 2, 1, 1, 1]] + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "3"} + - close_to: {hits.hits.0._score: {value: 0.675, error: 0.01}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 0.65, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "maxSimInvHamming(params.query_vector, 'bit_vector')" + params: + query_vector: [[1, 2, 1, 1, 1]] + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "3"} + - close_to: {hits.hits.0._score: {value: 0.675, error: 0.01}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 0.65, error: 0.01}} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml index 2ee38f849e9d4..cdd65ca0eb296 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml @@ -108,7 +108,7 @@ setup: capabilities: - method: POST path: /_search - capabilities: [ byte_float_bit_dot_product ] + capabilities: [ byte_float_bit_dot_product_with_bugfix ] reason: Capability required to run test - do: catch: bad_request @@ -399,7 +399,7 @@ setup: capabilities: - method: POST path: /_search - capabilities: [ byte_float_bit_dot_product ] + capabilities: [ byte_float_bit_dot_product_with_bugfix ] test_runner_features: [capabilities, close_to] reason: Capability required to run test - do: @@ -419,13 +419,13 @@ setup: - match: { hits.total: 3 } - match: {hits.hits.0._id: "2"} - - close_to: {hits.hits.0._score: {value: 35.999, error: 0.01}} + - close_to: {hits.hits.0._score: {value: 33.78, error: 0.01}} - match: {hits.hits.1._id: "3"} - - close_to: {hits.hits.1._score:{value: 27.23, error: 0.01}} + - close_to: {hits.hits.1._score:{value: 22.579, error: 0.01}} - match: {hits.hits.2._id: "1"} - - close_to: {hits.hits.2._score: {value: 16.57, error: 0.01}} + - close_to: {hits.hits.2._score: {value: 11.919, error: 0.01}} - do: headers: @@ -444,20 +444,20 @@ setup: - match: { hits.total: 3 } - match: {hits.hits.0._id: "2"} - - close_to: {hits.hits.0._score: {value: 35.999, error: 0.01}} + - close_to: {hits.hits.0._score: {value: 33.78, error: 0.01}} - match: {hits.hits.1._id: "3"} - - close_to: {hits.hits.1._score:{value: 27.23, error: 0.01}} + - close_to: {hits.hits.1._score:{value: 22.579, error: 0.01}} - match: {hits.hits.2._id: "1"} - - close_to: {hits.hits.2._score: {value: 16.57, error: 0.01}} + - close_to: {hits.hits.2._score: {value: 11.919, error: 0.01}} --- "Dot product with byte": - requires: capabilities: - method: POST path: /_search - capabilities: [ byte_float_bit_dot_product ] + capabilities: [ byte_float_bit_dot_product_with_bugfix ] test_runner_features: capabilities reason: Capability required to run test - do: @@ -476,14 +476,14 @@ setup: - match: { hits.total: 3 } - - match: {hits.hits.0._id: "1"} - - match: {hits.hits.0._score: 248} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0._score: 415} - - match: {hits.hits.1._id: "2"} - - match: {hits.hits.1._score: 136} + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 168} - - match: {hits.hits.2._id: "3"} - - match: {hits.hits.2._score: 20} + - match: {hits.hits.2._id: "2"} + - match: {hits.hits.2._score: 126} - do: headers: @@ -501,11 +501,11 @@ setup: - match: { hits.total: 3 } - - match: {hits.hits.0._id: "1"} - - match: {hits.hits.0._score: 248} + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0._score: 415} - - match: {hits.hits.1._id: "2"} - - match: {hits.hits.1._score: 136} + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1._score: 168} - - match: {hits.hits.2._id: "3"} - - match: {hits.hits.2._score: 20} + - match: {hits.hits.2._id: "2"} + - match: {hits.hits.2._score: 126} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml new file mode 100644 index 0000000000000..66cb3f3c46fcc --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/181_multi_dense_vector_dv_fields_api.yml @@ -0,0 +1,178 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ multi_dense_vector_script_access ] + test_runner_features: capabilities + reason: "Support for multi dense vector field script access capability required" + - skip: + features: headers + + - do: + indices.create: + index: test-index + body: + settings: + number_of_shards: 1 + mappings: + properties: + vector: + type: multi_dense_vector + dims: 5 + byte_vector: + type: multi_dense_vector + dims: 5 + element_type: byte + bit_vector: + type: multi_dense_vector + dims: 40 + element_type: bit + - do: + index: + index: test-index + id: "1" + body: + vector: [[230.0, 300.33, -34.8988, 15.555, -200.0], [-0.5, 100.0, -13, 14.8, -156.0]] + byte_vector: [[8, 5, -15, 1, -7], [-1, 115, -3, 4, -128]] + bit_vector: [[8, 5, -15, 1, -7], [-1, 115, -3, 4, -128]] + + - do: + index: + index: test-index + id: "3" + body: + vector: [[0.5, 111.3, -13.0, 14.8, -156.0]] + byte_vector: [[2, 18, -5, 0, -124]] + bit_vector: [[2, 18, -5, 0, -124]] + + - do: + indices.refresh: {} +--- +"Test vector magnitude equality": + - skip: + features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['vector'].magnitudes[0]" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 429.6021, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 192.6447, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['byte_vector'].magnitudes[0]" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "3"} + - close_to: {hits.hits.0._score: {value: 125.41531, error: 0.01}} + + - match: {hits.hits.1._id: "1"} + - close_to: {hits.hits.1._score: {value: 19.07878, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['bit_vector'].magnitudes[0]" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 3.872983, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 3.464101, error: 0.01}} +--- +"Test vector value scoring": + - skip: + features: close_to + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['vector'].vectorValues.next()[0];" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 230, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 0.5, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['byte_vector'].vectorValues.next()[0];" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 8, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 2, error: 0.01}} + + - do: + headers: + Content-Type: application/json + search: + rest_total_hits_as_int: true + body: + query: + script_score: + query: {match_all: {} } + script: + source: "doc['bit_vector'].vectorValues.next()[0];" + + - match: {hits.total: 2} + + - match: {hits.hits.0._id: "1"} + - close_to: {hits.hits.0._score: {value: 8, error: 0.01}} + + - match: {hits.hits.1._id: "3"} + - close_to: {hits.hits.1._score: {value: 2, error: 0.01}} diff --git a/modules/legacy-geo/build.gradle b/modules/legacy-geo/build.gradle index d936276362340..55171221396a3 100644 --- a/modules/legacy-geo/build.gradle +++ b/modules/legacy-geo/build.gradle @@ -7,8 +7,6 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -26,7 +24,7 @@ dependencies { testImplementation project(":test:framework") } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index 35842ad27643f..992f39a22b28c 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -7,10 +7,8 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams - -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -23,9 +21,3 @@ restResources { include '_common', 'cluster', 'field_caps', 'nodes', 'indices', 'index', 'search', 'get' } } - -if (BuildParams.isSnapshotBuild() == false) { - tasks.named("test").configure { - systemProperty 'es.index_mode_feature_flag_registered', 'true' - } -} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index dc9bc96f107a0..83fe07170d6e7 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -527,7 +527,13 @@ protected Number randomNumber() { public void testEncodeDecodeExactScalingFactor() { double v = randomValue(); - assertThat(encodeDecode(1 / v, v), equalTo(1 / v)); + double expected = 1 / v; + // We don't produce infinities while decoding. See #testDecodeHandlingInfinity(). + if (Double.isInfinite(expected)) { + var sign = expected == Double.POSITIVE_INFINITY ? 1 : -1; + expected = sign * Double.MAX_VALUE; + } + assertThat(encodeDecode(1 / v, v), equalTo(expected)); } /** diff --git a/modules/mapper-extras/src/yamlRestTest/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java b/modules/mapper-extras/src/yamlRestTest/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java index b325c81616257..80953af5a4cbb 100644 --- a/modules/mapper-extras/src/yamlRestTest/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java +++ b/modules/mapper-extras/src/yamlRestTest/java/org/elasticsearch/index/mapper/MapperExtrasClientYamlTestSuiteIT.java @@ -12,8 +12,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; /** Runs yaml rest tests */ public class MapperExtrasClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -26,4 +28,12 @@ public MapperExtrasClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate t public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("mapper-extras").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index c714e52512b2e..0d34b5f6e3b40 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -6,8 +6,8 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java index 6985f6da98cf1..12489ad37aabd 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java @@ -9,6 +9,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.CardinalityUpperBound; @@ -44,7 +45,7 @@ public ChildrenToParentAggregator( } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalParent( diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java index 60412179807a5..1b99d2b34046c 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -115,7 +116,7 @@ public void postCollection() throws IOException { } @Override - protected void prepareSubAggs(long[] ordsToCollect) throws IOException { + protected void prepareSubAggs(LongArray ordsToCollect) throws IOException { IndexReader indexReader = searcher().getIndexReader(); for (LeafReaderContext ctx : indexReader.leaves()) { Scorer childDocsScorer = outFilter.scorer(ctx); @@ -153,9 +154,10 @@ public float score() { * structure that maps a primitive long to a list of primitive * longs. */ - for (long owningBucketOrd : ordsToCollect) { - if (collectionStrategy.exists(owningBucketOrd, globalOrdinal)) { - collectBucket(sub, docId, owningBucketOrd); + for (long ord = 0; ord < ordsToCollect.size(); ord++) { + long ordToCollect = ordsToCollect.get(ord); + if (collectionStrategy.exists(ordToCollect, globalOrdinal)) { + collectBucket(sub, docId, ordToCollect); } } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index d8a061a2de6d9..939107f87715d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -9,6 +9,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.CardinalityUpperBound; @@ -40,7 +41,7 @@ public ParentToChildrenAggregator( } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalChildren( diff --git a/modules/parent-join/src/yamlRestTest/java/org/elasticsearch/join/ParentChildClientYamlTestSuiteIT.java b/modules/parent-join/src/yamlRestTest/java/org/elasticsearch/join/ParentChildClientYamlTestSuiteIT.java index 7b02f87691841..ecfc464f0739c 100644 --- a/modules/parent-join/src/yamlRestTest/java/org/elasticsearch/join/ParentChildClientYamlTestSuiteIT.java +++ b/modules/parent-join/src/yamlRestTest/java/org/elasticsearch/join/ParentChildClientYamlTestSuiteIT.java @@ -12,8 +12,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class ParentChildClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public ParentChildClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { @@ -24,4 +26,12 @@ public ParentChildClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate te public static Iterable parameters() throws Exception { return createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("parent-join").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index e7865a086a003..2d2f6767f5e62 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -6,8 +6,8 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/percolator/src/yamlRestTest/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java b/modules/percolator/src/yamlRestTest/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java index 21bc2d8a4ae10..d71e758525085 100644 --- a/modules/percolator/src/yamlRestTest/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java +++ b/modules/percolator/src/yamlRestTest/java/org/elasticsearch/percolator/PercolatorClientYamlTestSuiteIT.java @@ -12,8 +12,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class PercolatorClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public PercolatorClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { @@ -24,4 +26,12 @@ public PercolatorClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate tes public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("percolator").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 511dd7be9ae68..c9016798c18b9 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -25,7 +25,3 @@ testClusters.configureEach { // Modules who's integration is explicitly tested in integration tests module ':modules:lang-mustache' } - -tasks.named("yamlRestCompatTestTransform").configure({ task -> - task.skipTest("rank_eval/30_failures/Response format", "warning does not exist for compatibility") -}) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 14a6b1e3f5b82..7281c161e2c4a 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -10,15 +10,14 @@ import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.transform.UnzipTransform apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.jdk-download' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.legacy-java-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -33,7 +32,6 @@ testClusters.configureEach { module ':modules:rest-root' // Whitelist reindexing from the local node so we can test reindex-from-remote. setting 'reindex.remote.whitelist', '127.0.0.1:*' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } dependencies { @@ -42,6 +40,10 @@ dependencies { // for parent/child testing testImplementation project(':modules:parent-join') testImplementation project(':modules:rest-root') + + clusterModules project(':modules:lang-painless') + clusterModules project(':modules:parent-join') + clusterModules project(":modules:rest-root") } restResources { @@ -132,7 +134,7 @@ if (OS.current() == OS.WINDOWS) { oldEsDependency.getAttributes().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); TaskProvider fixture = tasks.register("oldEs${version}Fixture", AntFixture) { dependsOn project.configurations.oldesFixture, jdks.legacy, oldEsDependency - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" // old versions of Elasticsearch need JAVA_HOME env 'JAVA_HOME', jdks.legacy.javaHomePath diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java index 4a001bb2d0969..a4b030e3c793f 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java @@ -35,7 +35,7 @@ * The test works as follows: * 1. Start a large (reasonably long running) reindexing request on the coordinator-only node. * 2. Check that the reindexing task appears on the coordinating node - * 3. With a 10s timeout value for MAXIMUM_REINDEXING_TIMEOUT_SETTING, + * 3. With a 60s timeout value for MAXIMUM_REINDEXING_TIMEOUT_SETTING, * wait for the reindexing task to complete before closing the node * 4. Confirm that the reindexing task succeeds with the wait (it will fail without it) */ @@ -58,8 +58,9 @@ public void testReindexWithShutdown() throws Exception { final String masterNodeName = internalCluster().startMasterOnlyNode(); final String dataNodeName = internalCluster().startDataOnlyNode(); + /* Maximum time to wait for reindexing tasks to complete before shutdown */ final Settings COORD_SETTINGS = Settings.builder() - .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(10)) + .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(60)) .build(); final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); @@ -118,7 +119,7 @@ public void onFailure(Exception e) { internalCluster().stopNode(coordNodeName); } - // Make sure all documents from the source index have been reindexed into the destination index + // Make sure all documents from the source index have been re-indexed into the destination index private void checkDestinationIndex(String dataNodeName, int numDocs) throws Exception { assertTrue(indexExists(DEST_INDEX)); flushAndRefresh(DEST_INDEX); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index 3905edae46c2f..a4aa0514bb47a 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -208,7 +208,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { assertIndexHasCorrectProperties( finalMetadata, - ".int-man-old-reindexed-for-8", + ".int-man-old-reindexed-for-9", INTERNAL_MANAGED_FLAG_VALUE, true, true, @@ -216,7 +216,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { ); assertIndexHasCorrectProperties( finalMetadata, - ".int-unman-old-reindexed-for-8", + ".int-unman-old-reindexed-for-9", INTERNAL_UNMANAGED_FLAG_VALUE, false, true, @@ -224,7 +224,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { ); assertIndexHasCorrectProperties( finalMetadata, - ".ext-man-old-reindexed-for-8", + ".ext-man-old-reindexed-for-9", EXTERNAL_MANAGED_FLAG_VALUE, true, false, @@ -232,7 +232,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { ); assertIndexHasCorrectProperties( finalMetadata, - ".ext-unman-old-reindexed-for-8", + ".ext-unman-old-reindexed-for-9", EXTERNAL_UNMANAGED_FLAG_VALUE, false, false, diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java index 1ee5519593569..3442e9dc43925 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -218,7 +218,7 @@ public void testMultipleFeatureMigration() throws Exception { // Finally, verify that all the indices exist and have the properties we expect. assertIndexHasCorrectProperties( finalMetadata, - ".int-man-old-reindexed-for-8", + ".int-man-old-reindexed-for-9", INTERNAL_MANAGED_FLAG_VALUE, true, true, @@ -226,7 +226,7 @@ public void testMultipleFeatureMigration() throws Exception { ); assertIndexHasCorrectProperties( finalMetadata, - ".int-unman-old-reindexed-for-8", + ".int-unman-old-reindexed-for-9", INTERNAL_UNMANAGED_FLAG_VALUE, false, true, @@ -234,7 +234,7 @@ public void testMultipleFeatureMigration() throws Exception { ); assertIndexHasCorrectProperties( finalMetadata, - ".ext-man-old-reindexed-for-8", + ".ext-man-old-reindexed-for-9", EXTERNAL_MANAGED_FLAG_VALUE, true, false, @@ -242,7 +242,7 @@ public void testMultipleFeatureMigration() throws Exception { ); assertIndexHasCorrectProperties( finalMetadata, - ".ext-unman-old-reindexed-for-8", + ".ext-unman-old-reindexed-for-9", EXTERNAL_UNMANAGED_FLAG_VALUE, false, false, @@ -251,7 +251,7 @@ public void testMultipleFeatureMigration() throws Exception { assertIndexHasCorrectProperties( finalMetadata, - ".second-int-man-old-reindexed-for-8", + ".second-int-man-old-reindexed-for-9", SECOND_FEATURE_IDX_FLAG_VALUE, true, true, diff --git a/modules/reindex/src/javaRestTest/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java b/modules/reindex/src/javaRestTest/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java index 99be7123040cc..a0212a937f27b 100644 --- a/modules/reindex/src/javaRestTest/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java +++ b/modules/reindex/src/javaRestTest/java/org/elasticsearch/index/reindex/ReindexWithoutContentIT.java @@ -18,7 +18,6 @@ import static org.hamcrest.CoreMatchers.containsString; public class ReindexWithoutContentIT extends ESRestTestCase { - public void testReindexMissingBody() throws IOException { ResponseException responseException = expectThrows( ResponseException.class, diff --git a/modules/reindex/src/yamlRestTest/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java b/modules/reindex/src/yamlRestTest/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java index a44a2150bf07c..0378a63ed5481 100644 --- a/modules/reindex/src/yamlRestTest/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java +++ b/modules/reindex/src/yamlRestTest/java/org/elasticsearch/index/reindex/ReindexClientYamlTestSuiteIT.java @@ -12,8 +12,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class ReindexClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public ReindexClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { @@ -24,4 +26,18 @@ public ReindexClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCa public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("reindex") + .module("lang-painless") + .module("parent-join") + .module("rest-root") + .setting("reindex.remote.whitelist", "127.0.0.1:*") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index 86776e743685e..8c1ca3891bc1e 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -1,8 +1,3 @@ -import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin -import org.elasticsearch.gradle.internal.test.RestIntegTestTask - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -11,6 +6,11 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ + +import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin +import org.elasticsearch.gradle.internal.test.RestIntegTestTask + apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -321,7 +321,7 @@ tasks.register("workloadIdentityYamlRestTest", RestIntegTestTask) { // omitting key and sas_token so that we use a bearer token from workload identity } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // Cannot override the trust store in FIPS mode, and these tasks require a HTTPS fixture tasks.named("managedIdentityYamlRestTest").configure { enabled = false } tasks.named("workloadIdentityYamlRestTest").configure { enabled = false } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index bd21f208faac4..3fa4f7de7e717 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.azure; import fixture.azure.AzureHttpHandler; +import fixture.azure.MockAzureBlobStore; import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; @@ -184,7 +185,12 @@ long getUploadBlockSize() { @SuppressForbidden(reason = "this test uses a HttpHandler to emulate an Azure endpoint") private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implements BlobStoreHttpHandler { AzureBlobStoreHttpHandler(final String account, final String container) { - super(account, container, null /* no auth header validation - sometimes it's omitted in these tests (TODO why?) */); + super( + account, + container, + null /* no auth header validation - sometimes it's omitted in these tests (TODO why?) */, + MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE + ); } } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 6d5c17c392141..40be0f8ca78c4 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.azure; import fixture.azure.AzureHttpFixture; +import fixture.azure.MockAzureBlobStore; import com.azure.core.exception.HttpResponseException; import com.azure.storage.blob.BlobContainerClient; @@ -60,7 +61,8 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi System.getProperty("test.azure.container"), System.getProperty("test.azure.tenant_id"), System.getProperty("test.azure.client_id"), - AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_ACCOUNT) + AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_ACCOUNT), + MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE ); @Override diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 52bc1ee1399d4..73936d82fc204 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -180,7 +180,7 @@ protected String buildKey(String blobName) { } private boolean skipRegisterOperation(ActionListener listener) { - return skipCas(listener) || skipIfNotPrimaryOnlyLocationMode(listener); + return skipIfNotPrimaryOnlyLocationMode(listener); } private boolean skipIfNotPrimaryOnlyLocationMode(ActionListener listener) { diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 3c64bb9f3b830..b4567a92184fc 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -40,6 +40,7 @@ import com.azure.storage.blob.models.ListBlobsOptions; import com.azure.storage.blob.options.BlobParallelUploadOptions; import com.azure.storage.blob.options.BlockBlobSimpleUploadOptions; +import com.azure.storage.blob.specialized.BlobLeaseClient; import com.azure.storage.blob.specialized.BlobLeaseClientBuilder; import com.azure.storage.blob.specialized.BlockBlobAsyncClient; @@ -1010,7 +1011,7 @@ private static BytesReference innerCompareAndExchangeRegister( } return currentValue; } finally { - leaseClient.releaseLease(); + bestEffortRelease(leaseClient); } } else { if (expected.length() == 0) { @@ -1020,6 +1021,29 @@ private static BytesReference innerCompareAndExchangeRegister( } } + /** + * Release the lease, ignoring conflicts due to expiry + * + * @see Outcomes of lease operations by lease state + * @param leaseClient The client for the lease + */ + private static void bestEffortRelease(BlobLeaseClient leaseClient) { + try { + leaseClient.releaseLease(); + } catch (BlobStorageException blobStorageException) { + if (blobStorageException.getStatusCode() == RestStatus.CONFLICT.getStatus()) { + // This is OK, we tried to release a lease that was expired/re-acquired + logger.debug( + "Ignored conflict on release: errorCode={}, message={}", + blobStorageException.getErrorCode(), + blobStorageException.getMessage() + ); + } else { + throw blobStorageException; + } + } + } + private static BytesReference downloadRegisterBlob( String containerPath, String blobKey, diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java index 6730e5c3c81bd..812d519e60260 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerStatsTests.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.azure; import fixture.azure.AzureHttpHandler; +import fixture.azure.MockAzureBlobStore; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.common.bytes.BytesReference; @@ -26,7 +27,7 @@ public class AzureBlobContainerStatsTests extends AbstractAzureServerTestCase { @SuppressForbidden(reason = "use a http server") @Before public void configureAzureHandler() { - httpServer.createContext("/", new AzureHttpHandler(ACCOUNT, CONTAINER, null)); + httpServer.createContext("/", new AzureHttpHandler(ACCOUNT, CONTAINER, null, MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE)); } public void testOperationPurposeIsReflectedInBlobStoreStats() throws IOException { diff --git a/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java b/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java index 64dde0248ad2c..b24574da36825 100644 --- a/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java +++ b/modules/repository-azure/src/yamlRestTest/java/org/elasticsearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.repositories.azure; import fixture.azure.AzureHttpFixture; +import fixture.azure.MockAzureBlobStore; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -47,7 +48,8 @@ public class RepositoryAzureClientYamlTestSuiteIT extends ESClientYamlSuiteTestC AZURE_TEST_CONTAINER, AZURE_TEST_TENANT_ID, AZURE_TEST_CLIENT_ID, - decideAuthHeaderPredicate() + decideAuthHeaderPredicate(), + MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE ); private static Predicate decideAuthHeaderPredicate() { diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index a4a7d0b22a0ed..968e93cf9fc55 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -193,6 +193,20 @@ setup: container: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE client: integration_test +--- +"Register a read-only repository with a non existing container": + + - do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository + body: + type: azure + settings: + container: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test + readonly: true + --- "Register a repository with a non existing client": diff --git a/modules/repository-gcs/build.gradle b/modules/repository-gcs/build.gradle index 246611e4803a2..811645d154c7a 100644 --- a/modules/repository-gcs/build.gradle +++ b/modules/repository-gcs/build.gradle @@ -9,7 +9,6 @@ import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin import java.nio.file.Files @@ -178,7 +177,7 @@ tasks.named("thirdPartyAudit").configure { ) - if(BuildParams.graalVmRuntime == false) { + if(buildParams.graalVmRuntime == false) { ignoreMissingClasses( 'org.graalvm.nativeimage.hosted.Feature', 'org.graalvm.nativeimage.hosted.Feature$BeforeAnalysisAccess', @@ -240,7 +239,7 @@ def gcsThirdPartyTest = tasks.register("gcsThirdPartyUnitTest", Test) { systemProperty 'tests.security.manager', false systemProperty 'test.google.bucket', gcsBucket systemProperty 'test.google.fixture', Boolean.toString(useFixture) - nonInputProperties.systemProperty 'test.google.base', gcsBasePath + "_third_party_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.google.base', gcsBasePath + "_third_party_tests_" + buildParams.testSeed if (useFixture == false) { nonInputProperties.systemProperty 'test.google.account', "${-> encodedCredentials.call()}" } diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 110c31b212ea1..a53ec71f66376 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.test.fixture.HttpHeaderParser; import org.threeten.bp.Duration; import java.io.IOException; @@ -177,9 +178,9 @@ public void testReadLargeBlobWithRetries() throws Exception { httpServer.createContext(downloadStorageEndpoint(blobContainer, "large_blob_retries"), exchange -> { Streams.readFully(exchange.getRequestBody()); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - final Tuple range = getRange(exchange); - final int offset = Math.toIntExact(range.v1()); - final byte[] chunk = Arrays.copyOfRange(bytes, offset, Math.toIntExact(Math.min(range.v2() + 1, bytes.length))); + final HttpHeaderParser.Range range = getRange(exchange); + final int offset = Math.toIntExact(range.start()); + final byte[] chunk = Arrays.copyOfRange(bytes, offset, Math.toIntExact(Math.min(range.end() + 1, bytes.length))); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), chunk.length); if (randomBoolean() && countDown.decrementAndGet() >= 0) { exchange.getResponseBody().write(chunk, 0, chunk.length - 1); diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 346a458a65f85..2cfb5d23db4ff 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -1,7 +1,3 @@ -import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -10,8 +6,12 @@ import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ +import org.apache.tools.ant.filters.ReplaceTokens +import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin + apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' +apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { description 'The S3 repository plugin adds S3 repositories' @@ -43,13 +43,24 @@ dependencies { api 'javax.xml.bind:jaxb-api:2.2.2' testImplementation project(':test:fixtures:s3-fixture') - yamlRestTestImplementation project(":test:framework") - yamlRestTestImplementation project(':test:fixtures:s3-fixture') - yamlRestTestImplementation project(':test:fixtures:minio-fixture') + internalClusterTestImplementation project(':test:fixtures:minio-fixture') + internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + yamlRestTestImplementation project(':modules:repository-s3') + yamlRestTestImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(':test:fixtures:testcontainer-utils') + yamlRestTestImplementation project(':test:framework') yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" - internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + + javaRestTestImplementation project(':modules:repository-s3') + javaRestTestImplementation project(':test:fixtures:aws-sts-fixture') + javaRestTestImplementation project(':test:fixtures:ec2-imds-fixture') + javaRestTestImplementation project(':test:fixtures:minio-fixture') + javaRestTestImplementation project(':test:fixtures:s3-fixture') + javaRestTestImplementation project(':test:fixtures:testcontainer-utils') + javaRestTestImplementation project(':test:framework') + javaRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" } restResources { @@ -76,90 +87,25 @@ def testRepositoryCreds = tasks.register("testRepositoryCreds", Test) { testClassesDirs = sourceSets.test.output.classesDirs } -tasks.named('check').configure { - dependsOn(testRepositoryCreds) -} - tasks.named('test').configure { // this is tested explicitly in separate test tasks exclude '**/RepositoryCredentialsTests.class' } boolean useFixture = false - -// We test against two repositories, one which uses the usual two-part "permanent" credentials and -// the other which uses three-part "temporary" or "session" credentials. - String s3PermanentAccessKey = System.getenv("amazon_s3_access_key") String s3PermanentSecretKey = System.getenv("amazon_s3_secret_key") String s3PermanentBucket = System.getenv("amazon_s3_bucket") String s3PermanentBasePath = System.getenv("amazon_s3_base_path") -String s3TemporaryAccessKey = System.getenv("amazon_s3_access_key_temporary") -String s3TemporarySecretKey = System.getenv("amazon_s3_secret_key_temporary") -String s3TemporarySessionToken = System.getenv("amazon_s3_session_token_temporary") -String s3TemporaryBucket = System.getenv("amazon_s3_bucket_temporary") -String s3TemporaryBasePath = System.getenv("amazon_s3_base_path_temporary") - -String s3EC2Bucket = System.getenv("amazon_s3_bucket_ec2") -String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2") - -String s3ECSBucket = System.getenv("amazon_s3_bucket_ecs") -String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs") - -String s3STSBucket = System.getenv("amazon_s3_bucket_sts") -String s3STSBasePath = System.getenv("amazon_s3_base_path_sts") - -boolean s3DisableChunkedEncoding = BuildParams.random.nextBoolean() - -// If all these variables are missing then we are testing against the internal fixture instead, which has the following -// credentials hard-coded in. +// If all these variables are missing then we are testing against the internal fixture instead, which has the following credentials hard-coded in. if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath) { + useFixture = true s3PermanentAccessKey = 's3_test_access_key' s3PermanentSecretKey = 's3_test_secret_key' s3PermanentBucket = 'bucket' s3PermanentBasePath = 'base_path' - useFixture = true -} -if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { - s3TemporaryAccessKey = 'session_token_access_key' - s3TemporarySecretKey = 'session_token_secret_key' - s3TemporaryBucket = 'session_token_bucket' - s3TemporaryBasePath = 'session_token_base_path' -} - -if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { - s3EC2Bucket = 'ec2_bucket' - s3EC2BasePath = 'ec2_base_path' - s3ECSBucket = 'ecs_bucket' - s3ECSBasePath = 'ecs_base_path' -} - -if (!s3STSBucket && !s3STSBasePath) { - s3STSBucket = 'sts_bucket' - s3STSBasePath = 'sts_base_path' -} - -tasks.named("processYamlRestTestResources").configure { - from("src/test/resources") { - include "aws-web-identity-token-file" - } - Map expansions = [ - 'permanent_bucket' : s3PermanentBucket, - 'permanent_base_path' : s3PermanentBasePath + "_integration_tests", - 'temporary_bucket' : s3TemporaryBucket, - 'temporary_base_path' : s3TemporaryBasePath + "_integration_tests", - 'ec2_bucket' : s3EC2Bucket, - 'ec2_base_path' : s3EC2BasePath, - 'ecs_bucket' : s3ECSBucket, - 'ecs_base_path' : s3ECSBasePath, - 'sts_bucket' : s3STSBucket, - 'sts_base_path' : s3STSBasePath, - 'disable_chunked_encoding': s3DisableChunkedEncoding - ] - inputs.properties(expansions) - filter("tokens" : expansions.collectEntries {k, v -> [k, v.toString()]} /* must be a map of strings */, ReplaceTokens.class) } tasks.named("internalClusterTest").configure { @@ -169,22 +115,7 @@ tasks.named("internalClusterTest").configure { systemProperty 'es.insecure_network_trace_enabled', 'true' } -tasks.named("yamlRestTest").configure { - systemProperty("s3PermanentAccessKey", s3PermanentAccessKey) - systemProperty("s3PermanentSecretKey", s3PermanentSecretKey) - systemProperty("s3TemporaryAccessKey", s3TemporaryAccessKey) - systemProperty("s3TemporarySecretKey", s3TemporarySecretKey) - systemProperty("s3EC2AccessKey", s3PermanentAccessKey) - - // ideally we could resolve an env path in cluster config as resource similar to configuring a config file - // not sure how common this is, but it would be nice to support - File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') - // The web identity token can be read only from the plugin config directory because of security restrictions - // Ideally we would create a symlink, but extraConfigFile doesn't support it - nonInputProperties.systemProperty("awsWebIdentityTokenExternalLocation", awsWebIdentityTokenExternalLocation.getAbsolutePath()) -} - -// 3rd Party Tests +// 3rd Party Tests, i.e. testing against a real S3 repository tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet internalTestSourceSet = sourceSets.getByName(InternalClusterTestPlugin.SOURCE_SET_NAME) @@ -192,13 +123,13 @@ tasks.register("s3ThirdPartyTest", Test) { setClasspath(internalTestSourceSet.getRuntimeClasspath()) include '**/S3RepositoryThirdPartyTests.class' systemProperty("tests.use.fixture", Boolean.toString(useFixture)) - - // test container accesses ~/.testcontainers.properties read - systemProperty "tests.security.manager", "false" systemProperty 'test.s3.account', s3PermanentAccessKey systemProperty 'test.s3.key', s3PermanentSecretKey systemProperty 'test.s3.bucket', s3PermanentBucket - nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + buildParams.testSeed + + // test container accesses ~/.testcontainers.properties read + systemProperty "tests.security.manager", "false" } tasks.named("thirdPartyAudit").configure { @@ -235,5 +166,6 @@ tasks.named("thirdPartyAudit").configure { tasks.named("check").configure { dependsOn(tasks.withType(Test)) + dependsOn(testRepositoryCreds) } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 3552cb8d9389a..4cebedebfba07 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -61,7 +61,12 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTes static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("tests.use.fixture", "true")); @ClassRule - public static MinioTestContainer minio = new MinioTestContainer(USE_FIXTURE); + public static MinioTestContainer minio = new MinioTestContainer( + USE_FIXTURE, + System.getProperty("test.s3.account"), + System.getProperty("test.s3.key"), + System.getProperty("test.s3.bucket") + ); @Override protected Collection> getPlugins() { diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java new file mode 100644 index 0000000000000..2199a64521759 --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3RestTestCase.java @@ -0,0 +1,383 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import io.netty.handler.codec.http.HttpMethod; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Set; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public abstract class AbstractRepositoryS3RestTestCase extends ESRestTestCase { + + public record TestRepository(String repositoryName, String clientName, String bucketName, String basePath) { + + public Closeable register() throws IOException { + return register(UnaryOperator.identity()); + } + + public Closeable register(UnaryOperator settingsUnaryOperator) throws IOException { + assertOK(client().performRequest(getRegisterRequest(settingsUnaryOperator))); + return () -> assertOK(client().performRequest(new Request("DELETE", "/_snapshot/" + repositoryName()))); + } + + private Request getRegisterRequest(UnaryOperator settingsUnaryOperator) throws IOException { + return newXContentRequest( + HttpMethod.PUT, + "/_snapshot/" + repositoryName(), + (b, p) -> b.field("type", S3Repository.TYPE) + .startObject("settings") + .value( + settingsUnaryOperator.apply( + Settings.builder() + .put("bucket", bucketName()) + .put("base_path", basePath()) + .put("client", clientName()) + .put("canned_acl", "private") + .put("storage_class", "standard") + .put("disable_chunked_encoding", randomBoolean()) + .build() + ) + ) + .endObject() + ); + } + } + + protected abstract String getBucketName(); + + protected abstract String getBasePath(); + + protected abstract String getClientName(); + + protected static String getIdentifierPrefix(String testSuiteName) { + return testSuiteName + "-" + Integer.toString(Murmur3HashFunction.hash(testSuiteName + System.getProperty("tests.seed")), 16) + "-"; + } + + private TestRepository newTestRepository() { + return new TestRepository(randomIdentifier(), getClientName(), getBucketName(), getBasePath()); + } + + private static UnaryOperator readonlyOperator(Boolean readonly) { + return readonly == null + ? UnaryOperator.identity() + : s -> Settings.builder().put(s).put(BlobStoreRepository.READONLY_SETTING_KEY, readonly).build(); + } + + public void testGetRepository() throws IOException { + testGetRepository(null); + } + + public void testGetRepositoryReadonlyTrue() throws IOException { + testGetRepository(Boolean.TRUE); + } + + public void testGetRepositoryReadonlyFalse() throws IOException { + testGetRepository(Boolean.FALSE); + } + + private void testGetRepository(Boolean readonly) throws IOException { + final var repository = newTestRepository(); + try (var ignored = repository.register(readonlyOperator(readonly))) { + final var repositoryName = repository.repositoryName(); + final var responseObjectPath = assertOKAndCreateObjectPath( + client().performRequest(new Request("GET", "/_snapshot/" + repositoryName)) + ); + + assertEquals("s3", responseObjectPath.evaluate(repositoryName + ".type")); + assertNotNull(responseObjectPath.evaluate(repositoryName + ".settings")); + assertEquals(repository.bucketName(), responseObjectPath.evaluate(repositoryName + ".settings.bucket")); + assertEquals(repository.clientName(), responseObjectPath.evaluate(repositoryName + ".settings.client")); + assertEquals(repository.basePath(), responseObjectPath.evaluate(repositoryName + ".settings.base_path")); + assertEquals("private", responseObjectPath.evaluate(repositoryName + ".settings.canned_acl")); + assertEquals("standard", responseObjectPath.evaluate(repositoryName + ".settings.storage_class")); + assertNull(responseObjectPath.evaluate(repositoryName + ".settings.access_key")); + assertNull(responseObjectPath.evaluate(repositoryName + ".settings.secret_key")); + assertNull(responseObjectPath.evaluate(repositoryName + ".settings.session_token")); + + if (readonly == null) { + assertNull(responseObjectPath.evaluate(repositoryName + ".settings." + BlobStoreRepository.READONLY_SETTING_KEY)); + } else { + assertEquals( + Boolean.toString(readonly), + responseObjectPath.evaluate(repositoryName + ".settings." + BlobStoreRepository.READONLY_SETTING_KEY) + ); + } + } + } + + public void testNonexistentBucket() throws Exception { + testNonexistentBucket(null); + } + + public void testNonexistentBucketReadonlyTrue() throws Exception { + testNonexistentBucket(Boolean.TRUE); + } + + public void testNonexistentBucketReadonlyFalse() throws Exception { + testNonexistentBucket(Boolean.FALSE); + } + + private void testNonexistentBucket(Boolean readonly) throws Exception { + final var repository = new TestRepository( + randomIdentifier(), + getClientName(), + randomValueOtherThan(getBucketName(), ESTestCase::randomIdentifier), + getBasePath() + ); + final var registerRequest = repository.getRegisterRequest(readonlyOperator(readonly)); + + final var responseException = expectThrows(ResponseException.class, () -> client().performRequest(registerRequest)); + assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); + assertThat( + responseException.getMessage(), + allOf(containsString("repository_verification_exception"), containsString("is not accessible on master node")) + ); + } + + public void testNonexistentClient() throws Exception { + testNonexistentClient(null); + } + + public void testNonexistentClientReadonlyTrue() throws Exception { + testNonexistentClient(Boolean.TRUE); + } + + public void testNonexistentClientReadonlyFalse() throws Exception { + testNonexistentClient(Boolean.FALSE); + } + + private void testNonexistentClient(Boolean readonly) throws Exception { + final var repository = new TestRepository( + randomIdentifier(), + randomValueOtherThanMany(c -> c.equals(getClientName()) || c.equals("default"), ESTestCase::randomIdentifier), + getBucketName(), + getBasePath() + ); + final var registerRequest = repository.getRegisterRequest(readonlyOperator(readonly)); + + final var responseException = expectThrows(ResponseException.class, () -> client().performRequest(registerRequest)); + assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); + assertThat( + responseException.getMessage(), + allOf( + containsString("repository_verification_exception"), + containsString("is not accessible on master node"), + containsString("illegal_argument_exception"), + containsString("Unknown s3 client name") + ) + ); + } + + public void testNonexistentSnapshot() throws Exception { + testNonexistentSnapshot(null); + } + + public void testNonexistentSnapshotReadonlyTrue() throws Exception { + testNonexistentSnapshot(Boolean.TRUE); + } + + public void testNonexistentSnapshotReadonlyFalse() throws Exception { + testNonexistentSnapshot(Boolean.FALSE); + } + + private void testNonexistentSnapshot(Boolean readonly) throws Exception { + final var repository = newTestRepository(); + try (var ignored = repository.register(readonlyOperator(readonly))) { + final var repositoryName = repository.repositoryName(); + + final var getSnapshotRequest = new Request("GET", "/_snapshot/" + repositoryName + "/" + randomIdentifier()); + final var getSnapshotException = expectThrows(ResponseException.class, () -> client().performRequest(getSnapshotRequest)); + assertEquals(RestStatus.NOT_FOUND.getStatus(), getSnapshotException.getResponse().getStatusLine().getStatusCode()); + assertThat(getSnapshotException.getMessage(), containsString("snapshot_missing_exception")); + + final var restoreRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + randomIdentifier() + "/_restore"); + if (randomBoolean()) { + restoreRequest.addParameter("wait_for_completion", Boolean.toString(randomBoolean())); + } + final var restoreException = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); + assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), restoreException.getResponse().getStatusLine().getStatusCode()); + assertThat(restoreException.getMessage(), containsString("snapshot_restore_exception")); + + if (readonly != Boolean.TRUE) { + final var deleteRequest = new Request("DELETE", "/_snapshot/" + repositoryName + "/" + randomIdentifier()); + final var deleteException = expectThrows(ResponseException.class, () -> client().performRequest(deleteRequest)); + assertEquals(RestStatus.NOT_FOUND.getStatus(), deleteException.getResponse().getStatusLine().getStatusCode()); + assertThat(deleteException.getMessage(), containsString("snapshot_missing_exception")); + } + } + } + + public void testUsageStats() throws Exception { + testUsageStats(null); + } + + public void testUsageStatsReadonlyTrue() throws Exception { + testUsageStats(Boolean.TRUE); + } + + public void testUsageStatsReadonlyFalse() throws Exception { + testUsageStats(Boolean.FALSE); + } + + private void testUsageStats(Boolean readonly) throws Exception { + final var repository = newTestRepository(); + try (var ignored = repository.register(readonlyOperator(readonly))) { + final var responseObjectPath = assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/_cluster/stats"))); + assertThat(responseObjectPath.evaluate("repositories.s3.count"), equalTo(1)); + + if (readonly == Boolean.TRUE) { + assertThat(responseObjectPath.evaluate("repositories.s3.read_only"), equalTo(1)); + assertNull(responseObjectPath.evaluate("repositories.s3.read_write")); + } else { + assertNull(responseObjectPath.evaluate("repositories.s3.read_only")); + assertThat(responseObjectPath.evaluate("repositories.s3.read_write"), equalTo(1)); + } + } + } + + public void testSnapshotAndRestore() throws Exception { + final var repository = newTestRepository(); + try (var ignored = repository.register()) { + final var repositoryName = repository.repositoryName(); + final var indexName = randomIdentifier(); + final var snapshotsToDelete = new ArrayList(2); + + try { + indexDocuments(indexName, """ + {"index":{"_id":"1"}} + {"snapshot":"one"} + {"index":{"_id":"2"}} + {"snapshot":"one"} + {"index":{"_id":"3"}} + {"snapshot":"one"} + """, 3); + + // create the first snapshot + final var snapshot1Name = randomIdentifier(); + createSnapshot(repositoryName, snapshotsToDelete, snapshot1Name); + + // check the first snapshot's status + { + final var snapshotStatusResponse = assertOKAndCreateObjectPath( + client().performRequest(new Request("GET", "/_snapshot/" + repositoryName + "/" + snapshot1Name + "/_status")) + ); + assertEquals(snapshot1Name, snapshotStatusResponse.evaluate("snapshots.0.snapshot")); + assertEquals("SUCCESS", snapshotStatusResponse.evaluate("snapshots.0.state")); + } + + // add more documents to the index + indexDocuments(indexName, """ + {"index":{"_id":"4"}} + {"snapshot":"one"} + {"index":{"_id":"5"}} + {"snapshot":"one"} + {"index":{"_id":"6"}} + {"snapshot":"one"} + {"index":{"_id":"7"}} + {"snapshot":"one"} + """, 7); + + // create the second snapshot + final var snapshot2Name = randomValueOtherThan(snapshot1Name, ESTestCase::randomIdentifier); + createSnapshot(repositoryName, snapshotsToDelete, snapshot2Name); + + // list the snapshots + { + final var listSnapshotsResponse = assertOKAndCreateObjectPath( + client().performRequest( + new Request("GET", "/_snapshot/" + repositoryName + "/" + snapshot1Name + "," + snapshot2Name) + ) + ); + assertEquals(2, listSnapshotsResponse.evaluateArraySize("snapshots")); + assertEquals( + Set.of(snapshot1Name, snapshot2Name), + Set.of( + listSnapshotsResponse.evaluate("snapshots.0.snapshot"), + listSnapshotsResponse.evaluate("snapshots.1.snapshot") + ) + ); + assertEquals("SUCCESS", listSnapshotsResponse.evaluate("snapshots.0.state")); + assertEquals("SUCCESS", listSnapshotsResponse.evaluate("snapshots.1.state")); + } + + // delete and restore the index from snapshot 2 + deleteAndRestoreIndex(indexName, repositoryName, snapshot2Name, 7); + + // delete and restore the index from snapshot 1 + deleteAndRestoreIndex(indexName, repositoryName, snapshot1Name, 3); + } finally { + if (snapshotsToDelete.isEmpty() == false) { + assertOK( + client().performRequest( + new Request( + "DELETE", + "/_snapshot/" + repositoryName + "/" + snapshotsToDelete.stream().collect(Collectors.joining(",")) + ) + ) + ); + } + } + } + } + + private static void deleteAndRestoreIndex(String indexName, String repositoryName, String snapshot2Name, int expectedDocCount) + throws IOException { + assertOK(client().performRequest(new Request("DELETE", "/" + indexName))); + final var restoreRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + snapshot2Name + "/_restore"); + restoreRequest.addParameter("wait_for_completion", "true"); + assertOK(client().performRequest(restoreRequest)); + assertIndexDocCount(indexName, expectedDocCount); + } + + private static void indexDocuments(String indexName, String body, int expectedDocCount) throws IOException { + // create and populate an index + final var indexDocsRequest = new Request("POST", "/" + indexName + "/_bulk"); + indexDocsRequest.addParameter("refresh", "true"); + indexDocsRequest.setJsonEntity(body); + assertFalse(assertOKAndCreateObjectPath(client().performRequest(indexDocsRequest)).evaluate("errors")); + + // check the index contents + assertIndexDocCount(indexName, expectedDocCount); + } + + private static void createSnapshot(String repositoryName, ArrayList snapshotsToDelete, String snapshotName) throws IOException { + final var createSnapshotRequest = new Request("POST", "/_snapshot/" + repositoryName + "/" + snapshotName); + createSnapshotRequest.addParameter("wait_for_completion", "true"); + final var createSnapshotResponse = assertOKAndCreateObjectPath(client().performRequest(createSnapshotRequest)); + snapshotsToDelete.add(snapshotName); + assertEquals(snapshotName, createSnapshotResponse.evaluate("snapshot.snapshot")); + assertEquals("SUCCESS", createSnapshotResponse.evaluate("snapshot.state")); + assertThat(createSnapshotResponse.evaluate("snapshot.shards.failed"), equalTo(0)); + } + + private static void assertIndexDocCount(String indexName, int expectedCount) throws IOException { + assertThat( + assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/" + indexName + "/_count"))).evaluate("count"), + equalTo(expectedCount) + ); + } +} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java new file mode 100644 index 0000000000000..45844703683bb --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3BasicCredentialsRestIT extends AbstractRepositoryS3RestTestCase { + + private static final String PREFIX = getIdentifierPrefix("RepositoryS3BasicCredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String ACCESS_KEY = PREFIX + "access-key"; + private static final String SECRET_KEY = PREFIX + "secret-key"; + private static final String CLIENT = "basic_credentials_client"; + + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, S3HttpFixture.fixedAccessKey(ACCESS_KEY)); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY) + .keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY) + .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getBucketName() { + return BUCKET; + } + + @Override + protected String getBasePath() { + return BASE_PATH; + } + + @Override + protected String getClientName() { + return CLIENT; + } +} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java new file mode 100644 index 0000000000000..a79ae4de7cc66 --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsVersion; +import fixture.s3.DynamicS3Credentials; +import fixture.s3.S3HttpFixture; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.util.Set; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTestCase { + + private static final String PREFIX = getIdentifierPrefix("RepositoryS3EcsCredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String CLIENT = "ecs_credentials_client"; + + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + + private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( + Ec2ImdsVersion.V1, + dynamicS3Credentials::addValidCredentials, + Set.of("/ecs_credentials_endpoint") + ); + + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) + .environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> ec2ImdsHttpFixture.getAddress() + "/ecs_credentials_endpoint") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(ec2ImdsHttpFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getBucketName() { + return BUCKET; + } + + @Override + protected String getBasePath() { + return BASE_PATH; + } + + @Override + protected String getClientName() { + return CLIENT; + } +} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java new file mode 100644 index 0000000000000..ead91981b3fa8 --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsVersion; +import fixture.s3.DynamicS3Credentials; +import fixture.s3.S3HttpFixture; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.util.Set; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3ImdsV1CredentialsRestIT extends AbstractRepositoryS3RestTestCase { + + private static final String PREFIX = getIdentifierPrefix("RepositoryS3ImdsV1CredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String CLIENT = "imdsv1_credentials_client"; + + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + + private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( + Ec2ImdsVersion.V1, + dynamicS3Credentials::addValidCredentials, + Set.of() + ); + + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) + .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", ec2ImdsHttpFixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(ec2ImdsHttpFixture).around(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getBucketName() { + return BUCKET; + } + + @Override + protected String getBasePath() { + return BASE_PATH; + } + + @Override + protected String getClientName() { + return CLIENT; + } +} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java new file mode 100644 index 0000000000000..67adb096bd1ba --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.aws.imds.Ec2ImdsHttpFixture; +import fixture.aws.imds.Ec2ImdsVersion; +import fixture.s3.DynamicS3Credentials; +import fixture.s3.S3HttpFixture; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.util.Set; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3RestTestCase { + + private static final String PREFIX = getIdentifierPrefix("RepositoryS3ImdsV2CredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String CLIENT = "imdsv2_credentials_client"; + + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + + private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( + Ec2ImdsVersion.V2, + dynamicS3Credentials::addValidCredentials, + Set.of() + ); + + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) + .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", ec2ImdsHttpFixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(ec2ImdsHttpFixture).around(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getBucketName() { + return BUCKET; + } + + @Override + protected String getBasePath() { + return BASE_PATH; + } + + @Override + protected String getClientName() { + return CLIENT; + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java similarity index 50% rename from modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java rename to modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java index d2b1413295ceb..93915e8491d5b 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java @@ -9,44 +9,56 @@ package org.elasticsearch.repositories.s3; -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.fixtures.minio.MinioTestContainer; import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) -public class RepositoryS3MinioClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3MinioBasicCredentialsRestIT extends AbstractRepositoryS3RestTestCase { - public static MinioTestContainer minio = new MinioTestContainer(); + private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String ACCESS_KEY = PREFIX + "access-key"; + private static final String SECRET_KEY = PREFIX + "secret-key"; + private static final String CLIENT = "minio_client"; + + private static final MinioTestContainer minioFixture = new MinioTestContainer(true, ACCESS_KEY, SECRET_KEY, BUCKET); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") - .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) - .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) - .setting("s3.client.integration_test_permanent.endpoint", () -> minio.getAddress()) + .keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY) + .keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY) + .setting("s3.client." + CLIENT + ".endpoint", minioFixture::getAddress) .build(); @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(minio).around(cluster); + public static TestRule ruleChain = RuleChain.outerRule(minioFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } - @ParametersFactory - public static Iterable parameters() throws Exception { - return createParameters(new String[] { "repository_s3/10_basic", "repository_s3/20_repository_permanent_credentials" }); + @Override + protected String getBucketName() { + return BUCKET; } - public RepositoryS3MinioClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); + @Override + protected String getBasePath() { + return BASE_PATH; } @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); + protected String getClientName() { + return CLIENT; } } diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java new file mode 100644 index 0000000000000..1f09fa6b081b9 --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; +import io.netty.handler.codec.http.HttpMethod; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; + +public class RepositoryS3RestReloadCredentialsIT extends ESRestTestCase { + + private static final String HASHED_SEED = Integer.toString(Murmur3HashFunction.hash(System.getProperty("tests.seed"))); + private static final String BUCKET = "RepositoryS3RestReloadCredentialsIT-bucket-" + HASHED_SEED; + private static final String BASE_PATH = "RepositoryS3RestReloadCredentialsIT-base-path-" + HASHED_SEED; + + private static volatile String repositoryAccessKey; + + public static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + BUCKET, + BASE_PATH, + S3HttpFixture.mutableAccessKey(() -> repositoryAccessKey) + ); + + private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore(keystoreSettings) + .setting("s3.client.default.endpoint", s3Fixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testReloadCredentialsFromKeystore() throws IOException { + // Register repository (?verify=false because we don't have access to the blob store yet) + final var repositoryName = randomIdentifier(); + registerRepository( + repositoryName, + S3Repository.TYPE, + false, + Settings.builder().put("bucket", BUCKET).put("base_path", BASE_PATH).build() + ); + final var verifyRequest = new Request("POST", "/_snapshot/" + repositoryName + "/_verify"); + + // Set up initial credentials + final var accessKey1 = randomIdentifier(); + repositoryAccessKey = accessKey1; + keystoreSettings.put("s3.client.default.access_key", accessKey1); + keystoreSettings.put("s3.client.default.secret_key", randomSecretKey()); + cluster.updateStoredSecureSettings(); + + assertOK(client().performRequest(createReloadSecureSettingsRequest())); + + // Check access using initial credentials + assertOK(client().performRequest(verifyRequest)); + + // Rotate credentials in blob store + final var accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomSecretKey); + repositoryAccessKey = accessKey2; + + // Ensure that initial credentials now invalid + final var accessDeniedException2 = expectThrows(ResponseException.class, () -> client().performRequest(verifyRequest)); + assertThat(accessDeniedException2.getResponse().getStatusLine().getStatusCode(), equalTo(500)); + assertThat( + accessDeniedException2.getMessage(), + allOf(containsString("Access denied"), containsString("Status Code: 403"), containsString("Error Code: AccessDenied")) + ); + + // Set up refreshed credentials + keystoreSettings.put("s3.client.default.access_key", accessKey2); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(createReloadSecureSettingsRequest())); + + // Check access using refreshed credentials + assertOK(client().performRequest(verifyRequest)); + } + + private Request createReloadSecureSettingsRequest() throws IOException { + return newXContentRequest( + HttpMethod.POST, + "/_nodes/reload_secure_settings", + (b, p) -> inFipsJvm() ? b.field("secure_settings_password", "keystore-password") : b + ); + } +} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java new file mode 100644 index 0000000000000..84a327ee131ae --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3SessionCredentialsRestIT extends AbstractRepositoryS3RestTestCase { + + private static final String PREFIX = getIdentifierPrefix("RepositoryS3SessionCredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String ACCESS_KEY = PREFIX + "access-key"; + private static final String SECRET_KEY = PREFIX + "secret-key"; + private static final String SESSION_TOKEN = PREFIX + "session-token"; + private static final String CLIENT = "session_credentials_client"; + + private static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + BUCKET, + BASE_PATH, + S3HttpFixture.fixedAccessKeyAndToken(ACCESS_KEY, SESSION_TOKEN) + ); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client." + CLIENT + ".access_key", ACCESS_KEY) + .keystore("s3.client." + CLIENT + ".secret_key", SECRET_KEY) + .keystore("s3.client." + CLIENT + ".session_token", SESSION_TOKEN) + .setting("s3.client." + CLIENT + ".endpoint", s3Fixture::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getBucketName() { + return BUCKET; + } + + @Override + protected String getBasePath() { + return BASE_PATH; + } + + @Override + protected String getClientName() { + return CLIENT; + } +} diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java new file mode 100644 index 0000000000000..de80e4179ef5e --- /dev/null +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.s3; + +import fixture.aws.sts.AwsStsHttpFixture; +import fixture.s3.DynamicS3Credentials; +import fixture.s3.S3HttpFixture; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 +public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTestCase { + + private static final String PREFIX = getIdentifierPrefix("RepositoryS3StsCredentialsRestIT"); + private static final String BUCKET = PREFIX + "bucket"; + private static final String BASE_PATH = PREFIX + "base_path"; + private static final String CLIENT = "sts_credentials_client"; + + private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + + private static final S3HttpFixture s3HttpFixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + + private static final String WEB_IDENTITY_TOKEN_FILE_CONTENTS = """ + Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDans\ + FBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFO\ + zTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ"""; + + private static final AwsStsHttpFixture stsHttpFixture = new AwsStsHttpFixture( + dynamicS3Credentials::addValidCredentials, + WEB_IDENTITY_TOKEN_FILE_CONTENTS + ); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client." + CLIENT + ".endpoint", s3HttpFixture::getAddress) + .systemProperty( + "com.amazonaws.sdk.stsMetadataServiceEndpointOverride", + () -> stsHttpFixture.getAddress() + "/assume-role-with-web-identity" + ) + .configFile( + S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION, + Resource.fromString(WEB_IDENTITY_TOKEN_FILE_CONTENTS) + ) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION) + // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3HttpFixture).around(stsHttpFixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getBucketName() { + return BUCKET; + } + + @Override + protected String getBasePath() { + return BASE_PATH; + } + + @Override + protected String getClientName() { + return CLIENT; + } +} diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 5fb3254df819b..d08bd40275fec 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -450,7 +450,7 @@ private static DeleteObjectsRequest bulkDelete(OperationPurpose purpose, S3BlobS @Override public void close() throws IOException { - this.service.close(); + service.onBlobStoreClose(); } @Override diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index fde15d5d6e6bc..591350c34ab85 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -318,8 +318,7 @@ class S3Repository extends MeteredBlobStoreRepository { deprecationLogger.critical( DeprecationCategory.SECURITY, "s3_repository_secret_settings", - "Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the elasticsearch keystore for secure settings." + INSECURE_CREDENTIALS_DEPRECATION_WARNING ); } @@ -336,6 +335,11 @@ class S3Repository extends MeteredBlobStoreRepository { ); } + static final String INSECURE_CREDENTIALS_DEPRECATION_WARNING = Strings.format(""" + This repository's settings include a S3 access key and secret key, but repository settings are stored in plaintext and must not be \ + used for security-sensitive information. Instead, store all secure settings in the keystore. See [%s] for more information.\ + """, ReferenceDocs.SECURE_SETTINGS); + private static Map buildLocation(RepositoryMetadata metadata) { return Map.of("base_path", BASE_PATH_SETTING.get(metadata.settings()), "bucket", BUCKET_SETTING.get(metadata.settings())); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 1ebd6f920d518..1a66f5782fc03 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -303,6 +303,10 @@ private synchronized void releaseCachedClients() { IdleConnectionReaper.shutdown(); } + public void onBlobStoreClose() { + releaseCachedClients(); + } + @Override public void close() throws IOException { releaseCachedClients(); @@ -345,6 +349,8 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials private static final String STS_HOSTNAME = "https://sts.amazonaws.com"; + static final String WEB_IDENTITY_TOKEN_FILE_LOCATION = "repository-s3/aws-web-identity-token-file"; + private STSAssumeRoleWithWebIdentitySessionCredentialsProvider credentialsProvider; private AWSSecurityTokenService stsClient; private String stsRegion; @@ -363,7 +369,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials } // Make sure that a readable symlink to the token file exists in the plugin config directory // AWS_WEB_IDENTITY_TOKEN_FILE exists but we only use Web Identity Tokens if a corresponding symlink exists and is readable - Path webIdentityTokenFileSymlink = environment.configFile().resolve("repository-s3/aws-web-identity-token-file"); + Path webIdentityTokenFileSymlink = environment.configFile().resolve(WEB_IDENTITY_TOKEN_FILE_LOCATION); if (Files.exists(webIdentityTokenFileSymlink) == false) { LOGGER.warn( "Cannot use AWS Web Identity Tokens: AWS_WEB_IDENTITY_TOKEN_FILE is defined but no corresponding symlink exists " diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 52fe152ba41e3..8e5f6634372db 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -107,10 +107,9 @@ public void testRepositoryCredentialsOverrideSecureCredentials() { assertThat(credentials.getAWSSecretKey(), is("insecure_aws_secret")); assertCriticalWarnings( + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release.", "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release.", - "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the elasticsearch keystore for secure settings.", - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release." + S3Repository.INSECURE_CREDENTIALS_DEPRECATION_WARNING ); } @@ -194,10 +193,9 @@ public void testReinitSecureCredentials() { if (hasInsecureSettings) { assertCriticalWarnings( + "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release.", "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release.", - "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the elasticsearch keystore for secure settings.", - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release." + S3Repository.INSECURE_CREDENTIALS_DEPRECATION_WARNING ); } } @@ -238,10 +236,7 @@ public void sendResponse(RestResponse response) { throw error.get(); } - assertWarnings( - "Using s3 access/secret key from repository settings. Instead store these in named clients and" - + " the elasticsearch keystore for secure settings." - ); + assertWarnings(S3Repository.INSECURE_CREDENTIALS_DEPRECATION_WARNING); } private void createRepository(final String name, final Settings repositorySettings) { diff --git a/modules/repository-s3/src/test/resources/aws-web-identity-token-file b/modules/repository-s3/src/test/resources/aws-web-identity-token-file deleted file mode 100644 index 15cb29eac2ff6..0000000000000 --- a/modules/repository-s3/src/test/resources/aws-web-identity-token-file +++ /dev/null @@ -1 +0,0 @@ -Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 0ae8af0989fa6..3d34934e54945 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -10,8 +10,6 @@ package org.elasticsearch.repositories.s3; import fixture.s3.S3HttpFixture; -import fixture.s3.S3HttpFixtureWithEC2; -import fixture.s3.S3HttpFixtureWithSessionToken; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -29,37 +27,29 @@ @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { - public static final S3HttpFixture s3Fixture = new S3HttpFixture(); - public static final S3HttpFixtureWithSessionToken s3HttpFixtureWithSessionToken = new S3HttpFixtureWithSessionToken(); - public static final S3HttpFixtureWithEC2 s3Ec2 = new S3HttpFixtureWithEC2(); + private static final String ACCESS_KEY = "RepositoryS3ClientYamlTestSuiteIT-access-key"; + private static final String SECRET_KEY = "RepositoryS3ClientYamlTestSuiteIT-secret-key"; - private static final String s3TemporarySessionToken = "session_token"; + private static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + "bucket", + "base_path_integration_tests", + S3HttpFixture.fixedAccessKey(ACCESS_KEY) + ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") - .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) - .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) - .keystore("s3.client.integration_test_temporary.access_key", System.getProperty("s3TemporaryAccessKey")) - .keystore("s3.client.integration_test_temporary.secret_key", System.getProperty("s3TemporarySecretKey")) - .keystore("s3.client.integration_test_temporary.session_token", s3TemporarySessionToken) + .keystore("s3.client.integration_test_permanent.access_key", ACCESS_KEY) + .keystore("s3.client.integration_test_permanent.secret_key", SECRET_KEY) .setting("s3.client.integration_test_permanent.endpoint", s3Fixture::getAddress) - .setting("s3.client.integration_test_temporary.endpoint", s3HttpFixtureWithSessionToken::getAddress) - .setting("s3.client.integration_test_ec2.endpoint", s3Ec2::getAddress) - .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", s3Ec2::getAddress) .build(); @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Ec2).around(s3HttpFixtureWithSessionToken).around(cluster); + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); @ParametersFactory public static Iterable parameters() throws Exception { - return createParameters( - new String[] { - "repository_s3/10_basic", - "repository_s3/20_repository_permanent_credentials", - "repository_s3/30_repository_temporary_credentials", - "repository_s3/40_repository_ec2_credentials" } - ); + return createParameters(new String[] { "repository_s3/10_basic", "repository_s3/20_repository_permanent_credentials" }); } public RepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java deleted file mode 100644 index fa21797540c17..0000000000000 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.repositories.s3; - -import fixture.s3.S3HttpFixtureWithECS; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class RepositoryS3EcsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { - private static final S3HttpFixtureWithECS s3Ecs = new S3HttpFixtureWithECS(); - - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .module("repository-s3") - .setting("s3.client.integration_test_ecs.endpoint", s3Ecs::getAddress) - .environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> (s3Ecs.getAddress() + "/ecs_credentials_endpoint")) - .build(); - - @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(s3Ecs).around(cluster); - - @ParametersFactory - public static Iterable parameters() throws Exception { - return createParameters(new String[] { "repository_s3/50_repository_ecs_credentials" }); - } - - public RepositoryS3EcsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } -} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java index 2baba66a8a4d0..ac356083983eb 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java @@ -21,10 +21,11 @@ public class RepositoryS3RegionalStsClientYamlTestSuiteIT extends AbstractReposi @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") - .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) - .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) - // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the - // S3HttpFixtureWithSTS fixture + .configFile(S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION, Resource.fromString(""" + Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDans\ + FBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFO\ + zTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ""")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", S3Service.CustomWebIdentityTokenCredentialsProvider.WEB_IDENTITY_TOKEN_FILE_LOCATION) .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") .environment("AWS_STS_REGIONAL_ENDPOINTS", "regional") @@ -33,6 +34,9 @@ public class RepositoryS3RegionalStsClientYamlTestSuiteIT extends AbstractReposi @ParametersFactory public static Iterable parameters() throws Exception { + // Run just the basic sanity test to make sure ES starts up and loads the S3 repository with a regional endpoint without an error. + // It would be great to make actual requests against a test fixture, but setting the region means using a production endpoint. + // See #102230 for more details. return createParameters(new String[] { "repository_s3/10_basic" }); } diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java deleted file mode 100644 index 24f03a6ae7624..0000000000000 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.repositories.s3; - -import fixture.s3.S3HttpFixture; -import fixture.s3.S3HttpFixtureWithSTS; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.util.resource.Resource; -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; - -public class RepositoryS3StsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { - - public static final S3HttpFixture s3Fixture = new S3HttpFixture(); - private static final S3HttpFixtureWithSTS s3Sts = new S3HttpFixtureWithSTS(); - - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .module("repository-s3") - .setting("s3.client.integration_test_sts.endpoint", s3Sts::getAddress) - .systemProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", () -> s3Sts.getAddress() + "/assume-role-with-web-identity") - .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) - .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) - // // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the - // // S3HttpFixtureWithSTS fixture - .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") - .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") - .build(); - - @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Sts).around(cluster); - - @ParametersFactory - public static Iterable parameters() throws Exception { - return createParameters(new String[] { "repository_s3/60_repository_sts_credentials" }); - } - - public RepositoryS3StsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } -} diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index e88a0861ec01c..6f6fdaed8c666 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -10,12 +10,11 @@ setup: body: type: s3 settings: - bucket: @permanent_bucket@ + bucket: bucket client: integration_test_permanent - base_path: "@permanent_base_path@" + base_path: base_path_integration_tests canned_acl: private storage_class: standard - disable_chunked_encoding: @disable_chunked_encoding@ # Remove the snapshots, if a previous test failed to delete them. This is # useful for third party tests that runs the test against a real external service. @@ -40,9 +39,9 @@ setup: body: type: s3 settings: - bucket: @permanent_bucket@ + bucket: bucket client: integration_test_permanent - base_path: "@permanent_base_path@" + base_path: base_path_integration_tests endpoint: 127.0.0.1:5 canned_acl: private storage_class: standard @@ -55,9 +54,9 @@ setup: body: type: s3 settings: - bucket: @permanent_bucket@ + bucket: bucket client: integration_test_permanent - base_path: "@permanent_base_path@" + base_path: base_path_integration_tests endpoint: 127.0.0.1:5 canned_acl: private storage_class: standard @@ -106,258 +105,6 @@ setup: - match: { snapshot.include_global_state: true } - match: { snapshot.shards.failed: 0 } ---- -"Snapshot and Restore with repository-s3 using permanent credentials": - - # Get repository - - do: - snapshot.get_repository: - repository: repository_permanent - - - match: { repository_permanent.settings.bucket : @permanent_bucket@ } - - match: { repository_permanent.settings.client : "integration_test_permanent" } - - match: { repository_permanent.settings.base_path : "@permanent_base_path@" } - - match: { repository_permanent.settings.canned_acl : "private" } - - match: { repository_permanent.settings.storage_class : "standard" } - - is_false: repository_permanent.settings.access_key - - is_false: repository_permanent.settings.secret_key - - is_false: repository_permanent.settings.session_token - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "1" - - snapshot: one - - index: - _index: docs - _id: "2" - - snapshot: one - - index: - _index: docs - _id: "3" - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository_permanent - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository_permanent - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "4" - - snapshot: two - - index: - _index: docs - _id: "5" - - snapshot: two - - index: - _index: docs - _id: "6" - - snapshot: two - - index: - _index: docs - _id: "7" - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository_permanent - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository_permanent - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository_permanent - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository_permanent - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository_permanent - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository_permanent - snapshot: snapshot-one - ---- -"Register a repository with a non existing bucket": - - - do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_permanent - body: - type: s3 - settings: - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_permanent - ---- -"Register a repository with a non existing client": - - - do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_permanent - body: - type: s3 - settings: - bucket: repository_permanent - client: unknown - ---- -"Register a read-only repository with a non existing bucket": - -- do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_permanent - body: - type: s3 - settings: - readonly: true - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_permanent - ---- -"Register a read-only repository with a non existing client": - -- do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_permanent - body: - type: s3 - settings: - readonly: true - bucket: repository_permanent - client: unknown - ---- -"Get a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.get: - repository: repository_permanent - snapshot: missing - ---- -"Delete a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.delete: - repository: repository_permanent - snapshot: missing - ---- -"Restore a non existing snapshot": - - - do: - catch: /snapshot_restore_exception/ - snapshot.restore: - repository: repository_permanent - snapshot: missing - wait_for_completion: true - ---- -"Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - - do: - cluster.stats: {} - - - gte: { repositories.s3.count: 1 } - - gte: { repositories.s3.read_write: 1 } - --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml deleted file mode 100644 index 501af980e17e3..0000000000000 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ /dev/null @@ -1,278 +0,0 @@ -# Integration tests for repository-s3 - ---- -setup: - - # Register repository with temporary credentials - - do: - snapshot.create_repository: - repository: repository_temporary - body: - type: s3 - settings: - bucket: @temporary_bucket@ - client: integration_test_temporary - base_path: "@temporary_base_path@" - canned_acl: private - storage_class: standard - disable_chunked_encoding: @disable_chunked_encoding@ - ---- -"Snapshot and Restore with repository-s3 using temporary credentials": - - # Get repository - - do: - snapshot.get_repository: - repository: repository_temporary - - - match: { repository_temporary.settings.bucket : @temporary_bucket@ } - - match: { repository_temporary.settings.client : "integration_test_temporary" } - - match: { repository_temporary.settings.base_path : "@temporary_base_path@" } - - match: { repository_temporary.settings.canned_acl : "private" } - - match: { repository_temporary.settings.storage_class : "standard" } - - is_false: repository_temporary.settings.access_key - - is_false: repository_temporary.settings.secret_key - - is_false: repository_temporary.settings.session_token - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "1" - - snapshot: one - - index: - _index: docs - _id: "2" - - snapshot: one - - index: - _index: docs - _id: "3" - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository_temporary - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository_temporary - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "4" - - snapshot: two - - index: - _index: docs - _id: "5" - - snapshot: two - - index: - _index: docs - _id: "6" - - snapshot: two - - index: - _index: docs - _id: "7" - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository_temporary - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository_temporary - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository_temporary - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository_temporary - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository_temporary - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository_temporary - snapshot: snapshot-one - ---- -"Register a repository with a non existing bucket": - - - do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_temporary - body: - type: s3 - settings: - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_temporary - ---- -"Register a repository with a non existing client": - - - do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_temporary - body: - type: s3 - settings: - bucket: repository_temporary - client: unknown - ---- -"Register a read-only repository with a non existing bucket": - -- do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_temporary - body: - type: s3 - settings: - readonly: true - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_temporary - ---- -"Register a read-only repository with a non existing client": - -- do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_temporary - body: - type: s3 - settings: - readonly: true - bucket: repository_temporary - client: unknown - ---- -"Get a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.get: - repository: repository_temporary - snapshot: missing - ---- -"Delete a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.delete: - repository: repository_temporary - snapshot: missing - ---- -"Restore a non existing snapshot": - - - do: - catch: /snapshot_restore_exception/ - snapshot.restore: - repository: repository_temporary - snapshot: missing - wait_for_completion: true - ---- -"Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - - do: - cluster.stats: {} - - - gte: { repositories.s3.count: 1 } - - gte: { repositories.s3.read_write: 1 } - ---- -teardown: - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository_temporary diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml deleted file mode 100644 index 129f0ba5d7588..0000000000000 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ /dev/null @@ -1,278 +0,0 @@ -# Integration tests for repository-s3 - ---- -setup: - - # Register repository with ec2 credentials - - do: - snapshot.create_repository: - repository: repository_ec2 - body: - type: s3 - settings: - bucket: @ec2_bucket@ - client: integration_test_ec2 - base_path: "@ec2_base_path@" - canned_acl: private - storage_class: standard - disable_chunked_encoding: @disable_chunked_encoding@ - ---- -"Snapshot and Restore with repository-s3 using ec2 credentials": - - # Get repository - - do: - snapshot.get_repository: - repository: repository_ec2 - - - match: { repository_ec2.settings.bucket : @ec2_bucket@ } - - match: { repository_ec2.settings.client : "integration_test_ec2" } - - match: { repository_ec2.settings.base_path : "@ec2_base_path@" } - - match: { repository_ec2.settings.canned_acl : "private" } - - match: { repository_ec2.settings.storage_class : "standard" } - - is_false: repository_ec2.settings.access_key - - is_false: repository_ec2.settings.secret_key - - is_false: repository_ec2.settings.session_token - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "1" - - snapshot: one - - index: - _index: docs - _id: "2" - - snapshot: one - - index: - _index: docs - _id: "3" - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository_ec2 - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository_ec2 - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "4" - - snapshot: two - - index: - _index: docs - _id: "5" - - snapshot: two - - index: - _index: docs - _id: "6" - - snapshot: two - - index: - _index: docs - _id: "7" - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository_ec2 - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository_ec2 - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository_ec2 - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository_ec2 - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository_ec2 - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository_ec2 - snapshot: snapshot-one - ---- -"Register a repository with a non existing bucket": - - - do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_ec2 - body: - type: s3 - settings: - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_temporary - ---- -"Register a repository with a non existing client": - - - do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_ec2 - body: - type: s3 - settings: - bucket: repository_ec2 - client: unknown - ---- -"Register a read-only repository with a non existing bucket": - -- do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_ec2 - body: - type: s3 - settings: - readonly: true - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_temporary - ---- -"Register a read-only repository with a non existing client": - -- do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_ec2 - body: - type: s3 - settings: - readonly: true - bucket: repository_ec2 - client: unknown - ---- -"Get a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.get: - repository: repository_ec2 - snapshot: missing - ---- -"Delete a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.delete: - repository: repository_ec2 - snapshot: missing - ---- -"Restore a non existing snapshot": - - - do: - catch: /snapshot_restore_exception/ - snapshot.restore: - repository: repository_ec2 - snapshot: missing - wait_for_completion: true - ---- -"Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - - do: - cluster.stats: {} - - - gte: { repositories.s3.count: 1 } - - gte: { repositories.s3.read_write: 1 } - ---- -teardown: - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository_ec2 diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml deleted file mode 100644 index de334b4b3df96..0000000000000 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ /dev/null @@ -1,278 +0,0 @@ -# Integration tests for repository-s3 - ---- -setup: - - # Register repository with ecs credentials - - do: - snapshot.create_repository: - repository: repository_ecs - body: - type: s3 - settings: - bucket: @ecs_bucket@ - client: integration_test_ecs - base_path: "@ecs_base_path@" - canned_acl: private - storage_class: standard - disable_chunked_encoding: @disable_chunked_encoding@ - ---- -"Snapshot and Restore with repository-s3 using ecs credentials": - - # Get repository - - do: - snapshot.get_repository: - repository: repository_ecs - - - match: { repository_ecs.settings.bucket : @ecs_bucket@ } - - match: { repository_ecs.settings.client : "integration_test_ecs" } - - match: { repository_ecs.settings.base_path : "@ecs_base_path@" } - - match: { repository_ecs.settings.canned_acl : "private" } - - match: { repository_ecs.settings.storage_class : "standard" } - - is_false: repository_ecs.settings.access_key - - is_false: repository_ecs.settings.secret_key - - is_false: repository_ecs.settings.session_token - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "1" - - snapshot: one - - index: - _index: docs - _id: "2" - - snapshot: one - - index: - _index: docs - _id: "3" - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository_ecs - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository_ecs - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: "4" - - snapshot: two - - index: - _index: docs - _id: "5" - - snapshot: two - - index: - _index: docs - _id: "6" - - snapshot: two - - index: - _index: docs - _id: "7" - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository_ecs - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository_ecs - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository_ecs - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository_ecs - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository_ecs - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository_ecs - snapshot: snapshot-one - ---- -"Register a repository with a non existing bucket": - - - do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_ecs - body: - type: s3 - settings: - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_ecs - ---- -"Register a repository with a non existing client": - - - do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_ecs - body: - type: s3 - settings: - bucket: repository_ecs - client: unknown - ---- -"Register a read-only repository with a non existing bucket": - -- do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_ecs - body: - type: s3 - settings: - readonly: true - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_ecs - ---- -"Register a read-only repository with a non existing client": - -- do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_ecs - body: - type: s3 - settings: - readonly: true - bucket: repository_ecs - client: unknown - ---- -"Get a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.get: - repository: repository_ecs - snapshot: missing - ---- -"Delete a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.delete: - repository: repository_ecs - snapshot: missing - ---- -"Restore a non existing snapshot": - - - do: - catch: /snapshot_restore_exception/ - snapshot.restore: - repository: repository_ecs - snapshot: missing - wait_for_completion: true - ---- -"Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - - do: - cluster.stats: {} - - - gte: { repositories.s3.count: 1 } - - gte: { repositories.s3.read_write: 1 } - ---- -teardown: - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository_ecs diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml deleted file mode 100644 index 09a8526017960..0000000000000 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml +++ /dev/null @@ -1,279 +0,0 @@ -# Integration tests for repository-s3 - ---- -setup: - - # Register repository with sts credentials - - do: - snapshot.create_repository: - repository: repository_sts - body: - type: s3 - settings: - bucket: @sts_bucket@ - client: integration_test_sts - base_path: "@sts_base_path@" - canned_acl: private - storage_class: standard - disable_chunked_encoding: @disable_chunked_encoding@ - ---- -"Snapshot and Restore repository-s3 using sts credentials": - - # Get repository - - do: - snapshot.get_repository: - repository: repository_sts - - - match: { repository_sts.settings.bucket: @sts_bucket@ } - - match: { repository_sts.settings.client: "integration_test_sts" } - - match: { repository_sts.settings.base_path: "@sts_base_path@" } - - match: { repository_sts.settings.canned_acl: "private" } - - match: { repository_sts.settings.storage_class: "standard" } - - is_false: repository_sts.settings.access_key - - is_false: repository_sts.settings.secret_key - - is_false: repository_sts.settings.session_token - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: 1 - - snapshot: one - - index: - _index: docs - _id: 2 - - snapshot: one - - index: - _index: docs - _id: 3 - - snapshot: one - - - do: - count: - index: docs - - - match: { count: 3 } - - # Create a first snapshot - - do: - snapshot.create: - repository: repository_sts - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state: SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed: 0 } - - - do: - snapshot.status: - repository: repository_sts - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state: SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _id: 4 - - snapshot: two - - index: - _index: docs - _id: 5 - - snapshot: two - - index: - _index: docs - _id: 6 - - snapshot: two - - index: - _index: docs - _id: 7 - - snapshot: two - - - do: - count: - index: docs - - - match: { count: 7 } - - # Create a second snapshot - - do: - snapshot.create: - repository: repository_sts - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state: SUCCESS } - - match: { snapshot.shards.failed: 0 } - - - do: - snapshot.get: - repository: repository_sts - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state: SUCCESS } - - match: { snapshots.1.state: SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository_sts - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: { count: 7 } - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository_sts - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: { count: 3 } - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository_sts - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository_sts - snapshot: snapshot-one - ---- - -"Register a repository with a non existing bucket": - - - do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_sts - body: - type: s3 - settings: - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_sts - ---- -"Register a repository with a non existing client": - - - do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_sts - body: - type: s3 - settings: - bucket: repository_sts - client: unknown - ---- -"Register a read-only repository with a non existing bucket": - - - do: - catch: /repository_verification_exception/ - snapshot.create_repository: - repository: repository_sts - body: - type: s3 - settings: - readonly: true - bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test_sts - ---- -"Register a read-only repository with a non existing client": - - - do: - catch: /illegal_argument_exception/ - snapshot.create_repository: - repository: repository_sts - body: - type: s3 - settings: - readonly: true - bucket: repository_sts - client: unknown - ---- -"Get a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.get: - repository: repository_sts - snapshot: missing - ---- -"Delete a non existing snapshot": - - - do: - catch: /snapshot_missing_exception/ - snapshot.delete: - repository: repository_sts - snapshot: missing - ---- -"Restore a non existing snapshot": - - - do: - catch: /snapshot_restore_exception/ - snapshot.restore: - repository: repository_sts - snapshot: missing - wait_for_completion: true - ---- -"Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - - do: - cluster.stats: {} - - - gte: { repositories.s3.count: 1 } - - gte: { repositories.s3.read_write: 1 } - ---- -teardown: - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository_sts diff --git a/modules/rest-root/build.gradle b/modules/rest-root/build.gradle index 05a545a1ed671..adb8aeb02863f 100644 --- a/modules/rest-root/build.gradle +++ b/modules/rest-root/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index e743939cbf79e..e8e06f0a9c4c7 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -22,7 +22,3 @@ dependencies { api project(':libs:grok') api project(':libs:dissect') } - -tasks.named("yamlRestCompatTestTransform").configure({ task -> - task.skipTestsByFilePattern("**/runtime_fields/110_composite.yml", "warning does not exist for compatibility") -}) diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index 647d38c626c74..4bb27af4bd0f5 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -174,7 +174,7 @@ public void testClientConnectionCloseMidStream() throws Exception { // await stream handler is ready and request full content var handler = ctx.awaitRestChannelAccepted(opaqueId); - assertBusy(() -> assertNotNull(handler.stream.buf())); + assertBusy(() -> assertNotEquals(0, handler.stream.bufSize())); assertFalse(handler.streamClosed); @@ -187,7 +187,7 @@ public void testClientConnectionCloseMidStream() throws Exception { // wait for resources to be released assertBusy(() -> { - assertNull(handler.stream.buf()); + assertEquals(0, handler.stream.bufSize()); assertTrue(handler.streamClosed); }); } @@ -204,13 +204,13 @@ public void testServerCloseConnectionMidStream() throws Exception { // await stream handler is ready and request full content var handler = ctx.awaitRestChannelAccepted(opaqueId); - assertBusy(() -> assertNotNull(handler.stream.buf())); + assertBusy(() -> assertNotEquals(0, handler.stream.bufSize())); assertFalse(handler.streamClosed); // terminate connection on server and wait resources are released handler.channel.request().getHttpChannel().close(); assertBusy(() -> { - assertNull(handler.stream.buf()); + assertEquals(0, handler.stream.bufSize()); assertTrue(handler.streamClosed); }); } @@ -226,14 +226,14 @@ public void testServerExceptionMidStream() throws Exception { // await stream handler is ready and request full content var handler = ctx.awaitRestChannelAccepted(opaqueId); - assertBusy(() -> assertNotNull(handler.stream.buf())); + assertBusy(() -> assertNotEquals(0, handler.stream.bufSize())); assertFalse(handler.streamClosed); handler.shouldThrowInsideHandleChunk = true; handler.stream.next(); assertBusy(() -> { - assertNull(handler.stream.buf()); + assertEquals(0, handler.stream.bufSize()); assertTrue(handler.streamClosed); }); } @@ -699,11 +699,6 @@ public Collection getRestHandlers( Predicate clusterSupportsFeature ) { return List.of(new BaseRestHandler() { - @Override - public boolean allowsUnsafeBuffers() { - return true; - } - @Override public String getName() { return ROUTE; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4TrashingAllocatorIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4TrashingAllocatorIT.java new file mode 100644 index 0000000000000..f3a10ce228117 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4TrashingAllocatorIT.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http.netty4; + +import io.netty.handler.codec.http.HttpResponseStatus; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ESNetty4IntegTestCase; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class Netty4TrashingAllocatorIT extends ESNetty4IntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.concatLists(List.of(Handler.class), super.nodePlugins()); + } + + @Override + protected boolean addMockHttpTransport() { + return false; + } + + public void testTrashContent() throws InterruptedException { + try (var client = new Netty4HttpClient()) { + var addr = randomFrom(internalCluster().getInstance(HttpServerTransport.class).boundAddress().boundAddresses()).address(); + var content = randomAlphaOfLength(between(1024, 2048)); + var responses = client.post(addr, List.of(new Tuple<>(Handler.ROUTE, content))); + assertEquals(HttpResponseStatus.OK, responses.stream().findFirst().get().status()); + } + } + + public static class Handler extends Plugin implements ActionPlugin { + static final String ROUTE = "/_test/trashing-alloc"; + + @Override + public Collection getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new BaseRestHandler() { + @Override + public String getName() { + return ROUTE; + } + + @Override + public List routes() { + return List.of(new Route(RestRequest.Method.POST, ROUTE)); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + var content = request.content(); + var iter = content.iterator(); + return (chan) -> { + request.getHttpRequest().release(); + assertFalse(content.hasReferences()); + BytesRef br; + while ((br = iter.next()) != null) { + for (int i = br.offset; i < br.offset + br.length; i++) { + if (br.bytes[i] != 0) { + fail( + new AssertionError( + "buffer is not trashed, off=" + + br.offset + + " len=" + + br.length + + " pos=" + + i + + " ind=" + + (i - br.offset) + ) + ); + } + } + } + chan.sendResponse(new RestResponse(RestStatus.OK, "")); + }; + } + }); + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java index a1aa211814520..2662ddf7e1440 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.http.netty4; -import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.EmptyHttpHeaders; @@ -128,39 +127,6 @@ public void release() { } } - @Override - public HttpRequest releaseAndCopy() { - assert released.get() == false; - if (pooled == false) { - return this; - } - try { - final ByteBuf copiedContent = Unpooled.copiedBuffer(request.content()); - HttpBody newContent; - if (content.isStream()) { - newContent = content; - } else { - newContent = Netty4Utils.fullHttpBodyFrom(copiedContent); - } - return new Netty4HttpRequest( - sequence, - new DefaultFullHttpRequest( - request.protocolVersion(), - request.method(), - request.uri(), - copiedContent, - request.headers(), - request.trailingHeaders() - ), - new AtomicBoolean(false), - false, - newContent - ); - } finally { - release(); - } - } - @Override public final Map> getHeaders() { return headers; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java index 238faa7a9237e..ac3e3aecf97b9 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java @@ -37,12 +37,15 @@ public class Netty4HttpRequestBodyStream implements HttpBody.Stream { private final List tracingHandlers = new ArrayList<>(4); private final ThreadContext threadContext; private ByteBuf buf; - private boolean hasLast = false; private boolean requested = false; private boolean closing = false; private HttpBody.ChunkHandler handler; private ThreadContext.StoredContext requestContext; + // used in tests + private volatile int bufSize = 0; + private volatile boolean hasLast = false; + public Netty4HttpRequestBodyStream(Channel channel, ThreadContext threadContext) { this.channel = channel; this.threadContext = threadContext; @@ -112,11 +115,12 @@ private void addChunk(ByteBuf chunk) { comp.addComponent(true, chunk); buf = comp; } + bufSize = buf.readableBytes(); } // visible for test - ByteBuf buf() { - return buf; + int bufSize() { + return bufSize; } // visible for test @@ -130,6 +134,7 @@ private void send() { var bytesRef = Netty4Utils.toReleasableBytesReference(buf); requested = false; buf = null; + bufSize = 0; try (var ignored = threadContext.restoreExistingContext(requestContext)) { for (var tracer : tracingHandlers) { tracer.onNext(bytesRef, hasLast); @@ -164,6 +169,7 @@ private void doClose() { if (buf != null) { buf.release(); buf = null; + bufSize = 0; } channel.config().setAutoRead(true); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index b971a52b7afb6..36c860f1fb90b 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -33,6 +33,7 @@ import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.util.AttributeKey; +import io.netty.util.ResourceLeakDetector; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -410,6 +411,9 @@ protected Result beginEncode(HttpResponse httpResponse, String acceptEncoding) t } }); } + if (ResourceLeakDetector.isEnabled()) { + ch.pipeline().addLast(new Netty4LeakDetectionHandler()); + } ch.pipeline() .addLast( "pipelining", diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4LeakDetectionHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4LeakDetectionHandler.java new file mode 100644 index 0000000000000..8a0274872e493 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4LeakDetectionHandler.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http.netty4; + +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpRequest; + +import org.elasticsearch.tasks.Task; + +/** + * Inbound channel handler that enrich leaking buffers information from HTTP request. + * It helps to detect which handler is leaking buffers. Especially integration tests that run with + * paranoid leak detector that samples all buffers for leaking. Supplying informative opaque-id in + * integ test helps to narrow down problem (for example test name). + */ +public class Netty4LeakDetectionHandler extends ChannelInboundHandlerAdapter { + + private String info; + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + if (msg instanceof HttpRequest request) { + var opaqueId = request.headers().get(Task.X_OPAQUE_ID_HTTP_HEADER); + info = "method: " + request.method() + "; uri: " + request.uri() + "; x-opaque-id: " + opaqueId; + } + if (msg instanceof HttpContent content) { + content.touch(info); + } + ctx.fireChannelRead(msg); + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java index 459b6c77be8c3..81b4fd3fbb9ee 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Utils.java @@ -179,7 +179,7 @@ public boolean hasReferences() { } public static HttpBody.Full fullHttpBodyFrom(final ByteBuf buf) { - return new HttpBody.ByteRefHttpBody(toBytesReference(buf)); + return new HttpBody.ByteRefHttpBody(toReleasableBytesReference(buf)); } public static Recycler createRecycler(Settings settings) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java index ab38b5f0c4c8c..e8bd5514947d6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java @@ -24,9 +24,11 @@ import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Booleans; import org.elasticsearch.monitor.jvm.JvmInfo; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; public class NettyAllocator { @@ -44,8 +46,9 @@ public class NettyAllocator { private static final String USE_NETTY_DEFAULT_CHUNK = "es.unsafe.use_netty_default_chunk_and_page_size"; static { + ByteBufAllocator allocator; if (Booleans.parseBoolean(System.getProperty(USE_NETTY_DEFAULT), false)) { - ALLOCATOR = ByteBufAllocator.DEFAULT; + allocator = ByteBufAllocator.DEFAULT; SUGGESTED_MAX_ALLOCATION_SIZE = 1024 * 1024; DESCRIPTION = "[name=netty_default, suggested_max_allocation_size=" + ByteSizeValue.ofBytes(SUGGESTED_MAX_ALLOCATION_SIZE) @@ -127,7 +130,12 @@ public class NettyAllocator { + g1gcRegionSize + "}]"; } - ALLOCATOR = new NoDirectBuffers(delegate); + allocator = new NoDirectBuffers(delegate); + } + if (Assertions.ENABLED) { + ALLOCATOR = new TrashingByteBufAllocator(allocator); + } else { + ALLOCATOR = allocator; } RECYCLER = new Recycler<>() { @@ -353,4 +361,62 @@ public ByteBufAllocator getDelegate() { return delegate; } } + + static class TrashingCompositeByteBuf extends CompositeByteBuf { + + TrashingCompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents) { + super(alloc, direct, maxNumComponents); + } + + @Override + protected void deallocate() { + TrashingByteBufAllocator.trashBuffer(this); + super.deallocate(); + } + } + + static class TrashingByteBufAllocator extends NoDirectBuffers { + + static int DEFAULT_MAX_COMPONENTS = 16; + + static void trashBuffer(ByteBuf buf) { + for (var nioBuf : buf.nioBuffers()) { + if (nioBuf.hasArray()) { + var from = nioBuf.arrayOffset() + nioBuf.position(); + var to = from + nioBuf.remaining(); + Arrays.fill(nioBuf.array(), from, to, (byte) 0); + } + } + } + + TrashingByteBufAllocator(ByteBufAllocator delegate) { + super(delegate); + } + + @Override + public ByteBuf heapBuffer() { + return new TrashingByteBuf(super.heapBuffer()); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity) { + return new TrashingByteBuf(super.heapBuffer(initialCapacity)); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) { + return new TrashingByteBuf(super.heapBuffer(initialCapacity, maxCapacity)); + } + + @Override + public CompositeByteBuf compositeHeapBuffer() { + return new TrashingCompositeByteBuf(this, false, DEFAULT_MAX_COMPONENTS); + } + + @Override + public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) { + return new TrashingCompositeByteBuf(this, false, maxNumComponents); + } + + } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/TrashingByteBuf.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/TrashingByteBuf.java new file mode 100644 index 0000000000000..ead0d595f0105 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/TrashingByteBuf.java @@ -0,0 +1,536 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.transport.netty4; + +import io.netty.buffer.ByteBuf; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +class TrashingByteBuf extends WrappedByteBuf { + + private boolean trashed = false; + + protected TrashingByteBuf(ByteBuf buf) { + super(buf); + } + + static TrashingByteBuf newBuf(ByteBuf buf) { + return new TrashingByteBuf(buf); + } + + @Override + public boolean release() { + if (refCnt() == 1) { + // see [NOTE on racy trashContent() calls] + trashContent(); + } + return super.release(); + } + + @Override + public boolean release(int decrement) { + if (refCnt() == decrement && refCnt() > 0) { + // see [NOTE on racy trashContent() calls] + trashContent(); + } + return super.release(decrement); + } + + // [NOTE on racy trashContent() calls]: We trash the buffer content _before_ reducing the ref + // count to zero, which looks racy because in principle a concurrent caller could come along + // and successfully retain() this buffer to keep it alive after it's been trashed. Such a + // caller would sometimes get an IllegalReferenceCountException ofc but that's something it + // could handle - see for instance org.elasticsearch.transport.netty4.Netty4Utils.ByteBufRefCounted.tryIncRef. + // Yet in practice this should never happen, we only ever retain() these buffers while we + // know them to be alive (i.e. via RefCounted#mustIncRef or its moral equivalents) so it'd + // be a bug for a caller to retain() a buffer whose ref count is heading to zero and whose + // contents we've already decided to trash. + private void trashContent() { + if (trashed == false) { + trashed = true; + NettyAllocator.TrashingByteBufAllocator.trashBuffer(buf); + } + } + + @Override + public ByteBuf capacity(int newCapacity) { + super.capacity(newCapacity); + return this; + } + + @Override + public ByteBuf order(ByteOrder endianness) { + return newBuf(super.order(endianness)); + } + + @Override + public ByteBuf asReadOnly() { + return newBuf(super.asReadOnly()); + } + + @Override + public ByteBuf setIndex(int readerIndex, int writerIndex) { + super.setIndex(readerIndex, writerIndex); + return this; + } + + @Override + public ByteBuf discardReadBytes() { + super.discardReadBytes(); + return this; + } + + @Override + public ByteBuf discardSomeReadBytes() { + super.discardSomeReadBytes(); + return this; + } + + @Override + public ByteBuf ensureWritable(int minWritableBytes) { + super.ensureWritable(minWritableBytes); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst) { + super.getBytes(index, dst); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst, int length) { + super.getBytes(index, dst, length); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { + super.getBytes(index, dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf getBytes(int index, byte[] dst) { + super.getBytes(index, dst); + return this; + } + + @Override + public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { + super.getBytes(index, dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuffer dst) { + super.getBytes(index, dst); + return this; + } + + @Override + public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException { + super.getBytes(index, out, length); + return this; + } + + @Override + public ByteBuf setBoolean(int index, boolean value) { + super.setBoolean(index, value); + return this; + } + + @Override + public ByteBuf setByte(int index, int value) { + super.setByte(index, value); + return this; + } + + @Override + public ByteBuf setShort(int index, int value) { + super.setShort(index, value); + return this; + } + + @Override + public ByteBuf setShortLE(int index, int value) { + super.setShortLE(index, value); + return this; + } + + @Override + public ByteBuf setMedium(int index, int value) { + super.setMedium(index, value); + return this; + } + + @Override + public ByteBuf setMediumLE(int index, int value) { + super.setMediumLE(index, value); + return this; + } + + @Override + public ByteBuf setInt(int index, int value) { + super.setInt(index, value); + return this; + } + + @Override + public ByteBuf setIntLE(int index, int value) { + super.setIntLE(index, value); + return this; + } + + @Override + public ByteBuf setLong(int index, long value) { + super.setLong(index, value); + return this; + } + + @Override + public ByteBuf setLongLE(int index, long value) { + super.setLongLE(index, value); + return this; + } + + @Override + public ByteBuf setChar(int index, int value) { + super.setChar(index, value); + return this; + } + + @Override + public ByteBuf setFloat(int index, float value) { + super.setFloat(index, value); + return this; + } + + @Override + public ByteBuf setDouble(int index, double value) { + super.setDouble(index, value); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src) { + super.setBytes(index, src); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int length) { + super.setBytes(index, src, length); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) { + super.setBytes(index, src, srcIndex, length); + return this; + } + + @Override + public ByteBuf setBytes(int index, byte[] src) { + super.setBytes(index, src); + return this; + } + + @Override + public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) { + super.setBytes(index, src, srcIndex, length); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuffer src) { + super.setBytes(index, src); + return this; + } + + @Override + public ByteBuf readBytes(int length) { + return newBuf(super.readBytes(length)); + } + + @Override + public ByteBuf readSlice(int length) { + return newBuf(super.readSlice(length)); + } + + @Override + public ByteBuf readRetainedSlice(int length) { + return newBuf(super.readRetainedSlice(length)); + } + + @Override + public ByteBuf readBytes(ByteBuf dst) { + super.readBytes(dst); + return this; + } + + @Override + public ByteBuf readBytes(ByteBuf dst, int length) { + super.readBytes(dst, length); + return this; + } + + @Override + public ByteBuf readBytes(ByteBuf dst, int dstIndex, int length) { + super.readBytes(dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf readBytes(byte[] dst) { + super.readBytes(dst); + return this; + } + + @Override + public ByteBuf readBytes(ByteBuffer dst) { + super.readBytes(dst); + return this; + } + + @Override + public ByteBuf readBytes(byte[] dst, int dstIndex, int length) { + super.readBytes(dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf readBytes(OutputStream out, int length) throws IOException { + super.readBytes(out, length); + return this; + } + + @Override + public ByteBuf skipBytes(int length) { + super.skipBytes(length); + return this; + } + + @Override + public ByteBuf writeBoolean(boolean value) { + super.writeBoolean(value); + return this; + } + + @Override + public ByteBuf writeByte(int value) { + super.writeByte(value); + return this; + } + + @Override + public ByteBuf writeShort(int value) { + super.writeShort(value); + return this; + } + + @Override + public ByteBuf writeShortLE(int value) { + super.writeShortLE(value); + return this; + } + + @Override + public ByteBuf writeMedium(int value) { + super.writeMedium(value); + return this; + } + + @Override + public ByteBuf writeMediumLE(int value) { + super.writeMediumLE(value); + return this; + } + + @Override + public ByteBuf writeInt(int value) { + super.writeInt(value); + return this; + + } + + @Override + public ByteBuf writeIntLE(int value) { + super.writeIntLE(value); + return this; + } + + @Override + public ByteBuf writeLong(long value) { + super.writeLong(value); + return this; + } + + @Override + public ByteBuf writeLongLE(long value) { + super.writeLongLE(value); + return this; + } + + @Override + public ByteBuf writeChar(int value) { + super.writeChar(value); + return this; + } + + @Override + public ByteBuf writeFloat(float value) { + super.writeFloat(value); + return this; + } + + @Override + public ByteBuf writeDouble(double value) { + super.writeDouble(value); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src) { + super.writeBytes(src); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src, int length) { + super.writeBytes(src, length); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src, int srcIndex, int length) { + super.writeBytes(src, srcIndex, length); + return this; + } + + @Override + public ByteBuf writeBytes(byte[] src) { + super.writeBytes(src); + return this; + } + + @Override + public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { + super.writeBytes(src, srcIndex, length); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuffer src) { + super.writeBytes(src); + return this; + } + + @Override + public ByteBuf writeZero(int length) { + super.writeZero(length); + return this; + } + + @Override + public ByteBuf copy() { + return newBuf(super.copy()); + } + + @Override + public ByteBuf copy(int index, int length) { + return newBuf(super.copy(index, length)); + } + + @Override + public ByteBuf slice() { + return newBuf(super.slice()); + } + + @Override + public ByteBuf retainedSlice() { + return newBuf(super.retainedSlice()); + } + + @Override + public ByteBuf slice(int index, int length) { + return newBuf(super.slice(index, length)); + } + + @Override + public ByteBuf retainedSlice(int index, int length) { + return newBuf(super.retainedSlice(index, length)); + } + + @Override + public ByteBuf duplicate() { + return newBuf(super.duplicate()); + } + + @Override + public ByteBuf retainedDuplicate() { + return newBuf(super.retainedDuplicate()); + } + + @Override + public ByteBuf retain(int increment) { + super.retain(increment); + return this; + } + + @Override + public ByteBuf touch(Object hint) { + super.touch(hint); + return this; + } + + @Override + public ByteBuf retain() { + super.retain(); + return this; + } + + @Override + public ByteBuf touch() { + super.touch(); + return this; + } + + @Override + public ByteBuf setFloatLE(int index, float value) { + return super.setFloatLE(index, value); + } + + @Override + public ByteBuf setDoubleLE(int index, double value) { + super.setDoubleLE(index, value); + return this; + } + + @Override + public ByteBuf writeFloatLE(float value) { + super.writeFloatLE(value); + return this; + } + + @Override + public ByteBuf writeDoubleLE(double value) { + super.writeDoubleLE(value); + return this; + } + + @Override + public ByteBuf asByteBuf() { + return this; + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/WrappedByteBuf.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/WrappedByteBuf.java new file mode 100644 index 0000000000000..50841cec000f1 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/WrappedByteBuf.java @@ -0,0 +1,1036 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.transport.netty4; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.util.ByteProcessor; +import io.netty.util.internal.ObjectUtil; +import io.netty.util.internal.StringUtil; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.FileChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.charset.Charset; + +/** + * A copy of Netty's WrappedByteBuf. + */ +class WrappedByteBuf extends ByteBuf { + + protected final ByteBuf buf; + + protected WrappedByteBuf(ByteBuf buf) { + this.buf = ObjectUtil.checkNotNull(buf, "buf"); + } + + @Override + public final boolean hasMemoryAddress() { + return buf.hasMemoryAddress(); + } + + @Override + public boolean isContiguous() { + return buf.isContiguous(); + } + + @Override + public final long memoryAddress() { + return buf.memoryAddress(); + } + + @Override + public final int capacity() { + return buf.capacity(); + } + + @Override + public ByteBuf capacity(int newCapacity) { + buf.capacity(newCapacity); + return this; + } + + @Override + public final int maxCapacity() { + return buf.maxCapacity(); + } + + @Override + public final ByteBufAllocator alloc() { + return buf.alloc(); + } + + @Override + public final ByteOrder order() { + return buf.order(); + } + + @Override + public ByteBuf order(ByteOrder endianness) { + return buf.order(endianness); + } + + @Override + public final ByteBuf unwrap() { + return buf; + } + + @Override + public ByteBuf asReadOnly() { + return buf.asReadOnly(); + } + + @Override + public boolean isReadOnly() { + return buf.isReadOnly(); + } + + @Override + public final boolean isDirect() { + return buf.isDirect(); + } + + @Override + public final int readerIndex() { + return buf.readerIndex(); + } + + @Override + public final ByteBuf readerIndex(int readerIndex) { + buf.readerIndex(readerIndex); + return this; + } + + @Override + public final int writerIndex() { + return buf.writerIndex(); + } + + @Override + public final ByteBuf writerIndex(int writerIndex) { + buf.writerIndex(writerIndex); + return this; + } + + @Override + public ByteBuf setIndex(int readerIndex, int writerIndex) { + buf.setIndex(readerIndex, writerIndex); + return this; + } + + @Override + public final int readableBytes() { + return buf.readableBytes(); + } + + @Override + public final int writableBytes() { + return buf.writableBytes(); + } + + @Override + public final int maxWritableBytes() { + return buf.maxWritableBytes(); + } + + @Override + public int maxFastWritableBytes() { + return buf.maxFastWritableBytes(); + } + + @Override + public final boolean isReadable() { + return buf.isReadable(); + } + + @Override + public final boolean isWritable() { + return buf.isWritable(); + } + + @Override + public final ByteBuf clear() { + buf.clear(); + return this; + } + + @Override + public final ByteBuf markReaderIndex() { + buf.markReaderIndex(); + return this; + } + + @Override + public final ByteBuf resetReaderIndex() { + buf.resetReaderIndex(); + return this; + } + + @Override + public final ByteBuf markWriterIndex() { + buf.markWriterIndex(); + return this; + } + + @Override + public final ByteBuf resetWriterIndex() { + buf.resetWriterIndex(); + return this; + } + + @Override + public ByteBuf discardReadBytes() { + buf.discardReadBytes(); + return this; + } + + @Override + public ByteBuf discardSomeReadBytes() { + buf.discardSomeReadBytes(); + return this; + } + + @Override + public ByteBuf ensureWritable(int minWritableBytes) { + buf.ensureWritable(minWritableBytes); + return this; + } + + @Override + public int ensureWritable(int minWritableBytes, boolean force) { + return buf.ensureWritable(minWritableBytes, force); + } + + @Override + public boolean getBoolean(int index) { + return buf.getBoolean(index); + } + + @Override + public byte getByte(int index) { + return buf.getByte(index); + } + + @Override + public short getUnsignedByte(int index) { + return buf.getUnsignedByte(index); + } + + @Override + public short getShort(int index) { + return buf.getShort(index); + } + + @Override + public short getShortLE(int index) { + return buf.getShortLE(index); + } + + @Override + public int getUnsignedShort(int index) { + return buf.getUnsignedShort(index); + } + + @Override + public int getUnsignedShortLE(int index) { + return buf.getUnsignedShortLE(index); + } + + @Override + public int getMedium(int index) { + return buf.getMedium(index); + } + + @Override + public int getMediumLE(int index) { + return buf.getMediumLE(index); + } + + @Override + public int getUnsignedMedium(int index) { + return buf.getUnsignedMedium(index); + } + + @Override + public int getUnsignedMediumLE(int index) { + return buf.getUnsignedMediumLE(index); + } + + @Override + public int getInt(int index) { + return buf.getInt(index); + } + + @Override + public int getIntLE(int index) { + return buf.getIntLE(index); + } + + @Override + public long getUnsignedInt(int index) { + return buf.getUnsignedInt(index); + } + + @Override + public long getUnsignedIntLE(int index) { + return buf.getUnsignedIntLE(index); + } + + @Override + public long getLong(int index) { + return buf.getLong(index); + } + + @Override + public long getLongLE(int index) { + return buf.getLongLE(index); + } + + @Override + public char getChar(int index) { + return buf.getChar(index); + } + + @Override + public float getFloat(int index) { + return buf.getFloat(index); + } + + @Override + public double getDouble(int index) { + return buf.getDouble(index); + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst) { + buf.getBytes(index, dst); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst, int length) { + buf.getBytes(index, dst, length); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { + buf.getBytes(index, dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf getBytes(int index, byte[] dst) { + buf.getBytes(index, dst); + return this; + } + + @Override + public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { + buf.getBytes(index, dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuffer dst) { + buf.getBytes(index, dst); + return this; + } + + @Override + public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException { + buf.getBytes(index, out, length); + return this; + } + + @Override + public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { + return buf.getBytes(index, out, length); + } + + @Override + public int getBytes(int index, FileChannel out, long position, int length) throws IOException { + return buf.getBytes(index, out, position, length); + } + + @Override + public CharSequence getCharSequence(int index, int length, Charset charset) { + return buf.getCharSequence(index, length, charset); + } + + @Override + public ByteBuf setBoolean(int index, boolean value) { + buf.setBoolean(index, value); + return this; + } + + @Override + public ByteBuf setByte(int index, int value) { + buf.setByte(index, value); + return this; + } + + @Override + public ByteBuf setShort(int index, int value) { + buf.setShort(index, value); + return this; + } + + @Override + public ByteBuf setShortLE(int index, int value) { + buf.setShortLE(index, value); + return this; + } + + @Override + public ByteBuf setMedium(int index, int value) { + buf.setMedium(index, value); + return this; + } + + @Override + public ByteBuf setMediumLE(int index, int value) { + buf.setMediumLE(index, value); + return this; + } + + @Override + public ByteBuf setInt(int index, int value) { + buf.setInt(index, value); + return this; + } + + @Override + public ByteBuf setIntLE(int index, int value) { + buf.setIntLE(index, value); + return this; + } + + @Override + public ByteBuf setLong(int index, long value) { + buf.setLong(index, value); + return this; + } + + @Override + public ByteBuf setLongLE(int index, long value) { + buf.setLongLE(index, value); + return this; + } + + @Override + public ByteBuf setChar(int index, int value) { + buf.setChar(index, value); + return this; + } + + @Override + public ByteBuf setFloat(int index, float value) { + buf.setFloat(index, value); + return this; + } + + @Override + public ByteBuf setDouble(int index, double value) { + buf.setDouble(index, value); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src) { + buf.setBytes(index, src); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int length) { + buf.setBytes(index, src, length); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) { + buf.setBytes(index, src, srcIndex, length); + return this; + } + + @Override + public ByteBuf setBytes(int index, byte[] src) { + buf.setBytes(index, src); + return this; + } + + @Override + public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) { + buf.setBytes(index, src, srcIndex, length); + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuffer src) { + buf.setBytes(index, src); + return this; + } + + @Override + public int setBytes(int index, InputStream in, int length) throws IOException { + return buf.setBytes(index, in, length); + } + + @Override + public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { + return buf.setBytes(index, in, length); + } + + @Override + public int setBytes(int index, FileChannel in, long position, int length) throws IOException { + return buf.setBytes(index, in, position, length); + } + + @Override + public ByteBuf setZero(int index, int length) { + buf.setZero(index, length); + return this; + } + + @Override + public int setCharSequence(int index, CharSequence sequence, Charset charset) { + return buf.setCharSequence(index, sequence, charset); + } + + @Override + public boolean readBoolean() { + return buf.readBoolean(); + } + + @Override + public byte readByte() { + return buf.readByte(); + } + + @Override + public short readUnsignedByte() { + return buf.readUnsignedByte(); + } + + @Override + public short readShort() { + return buf.readShort(); + } + + @Override + public short readShortLE() { + return buf.readShortLE(); + } + + @Override + public int readUnsignedShort() { + return buf.readUnsignedShort(); + } + + @Override + public int readUnsignedShortLE() { + return buf.readUnsignedShortLE(); + } + + @Override + public int readMedium() { + return buf.readMedium(); + } + + @Override + public int readMediumLE() { + return buf.readMediumLE(); + } + + @Override + public int readUnsignedMedium() { + return buf.readUnsignedMedium(); + } + + @Override + public int readUnsignedMediumLE() { + return buf.readUnsignedMediumLE(); + } + + @Override + public int readInt() { + return buf.readInt(); + } + + @Override + public int readIntLE() { + return buf.readIntLE(); + } + + @Override + public long readUnsignedInt() { + return buf.readUnsignedInt(); + } + + @Override + public long readUnsignedIntLE() { + return buf.readUnsignedIntLE(); + } + + @Override + public long readLong() { + return buf.readLong(); + } + + @Override + public long readLongLE() { + return buf.readLongLE(); + } + + @Override + public char readChar() { + return buf.readChar(); + } + + @Override + public float readFloat() { + return buf.readFloat(); + } + + @Override + public double readDouble() { + return buf.readDouble(); + } + + @Override + public ByteBuf readBytes(int length) { + return buf.readBytes(length); + } + + @Override + public ByteBuf readSlice(int length) { + return buf.readSlice(length); + } + + @Override + public ByteBuf readRetainedSlice(int length) { + return buf.readRetainedSlice(length); + } + + @Override + public ByteBuf readBytes(ByteBuf dst) { + buf.readBytes(dst); + return this; + } + + @Override + public ByteBuf readBytes(ByteBuf dst, int length) { + buf.readBytes(dst, length); + return this; + } + + @Override + public ByteBuf readBytes(ByteBuf dst, int dstIndex, int length) { + buf.readBytes(dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf readBytes(byte[] dst) { + buf.readBytes(dst); + return this; + } + + @Override + public ByteBuf readBytes(byte[] dst, int dstIndex, int length) { + buf.readBytes(dst, dstIndex, length); + return this; + } + + @Override + public ByteBuf readBytes(ByteBuffer dst) { + buf.readBytes(dst); + return this; + } + + @Override + public ByteBuf readBytes(OutputStream out, int length) throws IOException { + buf.readBytes(out, length); + return this; + } + + @Override + public int readBytes(GatheringByteChannel out, int length) throws IOException { + return buf.readBytes(out, length); + } + + @Override + public int readBytes(FileChannel out, long position, int length) throws IOException { + return buf.readBytes(out, position, length); + } + + @Override + public CharSequence readCharSequence(int length, Charset charset) { + return buf.readCharSequence(length, charset); + } + + @Override + public ByteBuf skipBytes(int length) { + buf.skipBytes(length); + return this; + } + + @Override + public ByteBuf writeBoolean(boolean value) { + buf.writeBoolean(value); + return this; + } + + @Override + public ByteBuf writeByte(int value) { + buf.writeByte(value); + return this; + } + + @Override + public ByteBuf writeShort(int value) { + buf.writeShort(value); + return this; + } + + @Override + public ByteBuf writeShortLE(int value) { + buf.writeShortLE(value); + return this; + } + + @Override + public ByteBuf writeMedium(int value) { + buf.writeMedium(value); + return this; + } + + @Override + public ByteBuf writeMediumLE(int value) { + buf.writeMediumLE(value); + return this; + } + + @Override + public ByteBuf writeInt(int value) { + buf.writeInt(value); + return this; + } + + @Override + public ByteBuf writeIntLE(int value) { + buf.writeIntLE(value); + return this; + } + + @Override + public ByteBuf writeLong(long value) { + buf.writeLong(value); + return this; + } + + @Override + public ByteBuf writeLongLE(long value) { + buf.writeLongLE(value); + return this; + } + + @Override + public ByteBuf writeChar(int value) { + buf.writeChar(value); + return this; + } + + @Override + public ByteBuf writeFloat(float value) { + buf.writeFloat(value); + return this; + } + + @Override + public ByteBuf writeDouble(double value) { + buf.writeDouble(value); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src) { + buf.writeBytes(src); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src, int length) { + buf.writeBytes(src, length); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src, int srcIndex, int length) { + buf.writeBytes(src, srcIndex, length); + return this; + } + + @Override + public ByteBuf writeBytes(byte[] src) { + buf.writeBytes(src); + return this; + } + + @Override + public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { + buf.writeBytes(src, srcIndex, length); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuffer src) { + buf.writeBytes(src); + return this; + } + + @Override + public int writeBytes(InputStream in, int length) throws IOException { + return buf.writeBytes(in, length); + } + + @Override + public int writeBytes(ScatteringByteChannel in, int length) throws IOException { + return buf.writeBytes(in, length); + } + + @Override + public int writeBytes(FileChannel in, long position, int length) throws IOException { + return buf.writeBytes(in, position, length); + } + + @Override + public ByteBuf writeZero(int length) { + buf.writeZero(length); + return this; + } + + @Override + public int writeCharSequence(CharSequence sequence, Charset charset) { + return buf.writeCharSequence(sequence, charset); + } + + @Override + public int indexOf(int fromIndex, int toIndex, byte value) { + return buf.indexOf(fromIndex, toIndex, value); + } + + @Override + public int bytesBefore(byte value) { + return buf.bytesBefore(value); + } + + @Override + public int bytesBefore(int length, byte value) { + return buf.bytesBefore(length, value); + } + + @Override + public int bytesBefore(int index, int length, byte value) { + return buf.bytesBefore(index, length, value); + } + + @Override + public int forEachByte(ByteProcessor processor) { + return buf.forEachByte(processor); + } + + @Override + public int forEachByte(int index, int length, ByteProcessor processor) { + return buf.forEachByte(index, length, processor); + } + + @Override + public int forEachByteDesc(ByteProcessor processor) { + return buf.forEachByteDesc(processor); + } + + @Override + public int forEachByteDesc(int index, int length, ByteProcessor processor) { + return buf.forEachByteDesc(index, length, processor); + } + + @Override + public ByteBuf copy() { + return buf.copy(); + } + + @Override + public ByteBuf copy(int index, int length) { + return buf.copy(index, length); + } + + @Override + public ByteBuf slice() { + return buf.slice(); + } + + @Override + public ByteBuf retainedSlice() { + return buf.retainedSlice(); + } + + @Override + public ByteBuf slice(int index, int length) { + return buf.slice(index, length); + } + + @Override + public ByteBuf retainedSlice(int index, int length) { + return buf.retainedSlice(index, length); + } + + @Override + public ByteBuf duplicate() { + return buf.duplicate(); + } + + @Override + public ByteBuf retainedDuplicate() { + return buf.retainedDuplicate(); + } + + @Override + public int nioBufferCount() { + return buf.nioBufferCount(); + } + + @Override + public ByteBuffer nioBuffer() { + return buf.nioBuffer(); + } + + @Override + public ByteBuffer nioBuffer(int index, int length) { + return buf.nioBuffer(index, length); + } + + @Override + public ByteBuffer[] nioBuffers() { + return buf.nioBuffers(); + } + + @Override + public ByteBuffer[] nioBuffers(int index, int length) { + return buf.nioBuffers(index, length); + } + + @Override + public ByteBuffer internalNioBuffer(int index, int length) { + return buf.internalNioBuffer(index, length); + } + + @Override + public boolean hasArray() { + return buf.hasArray(); + } + + @Override + public byte[] array() { + return buf.array(); + } + + @Override + public int arrayOffset() { + return buf.arrayOffset(); + } + + @Override + public String toString(Charset charset) { + return buf.toString(charset); + } + + @Override + public String toString(int index, int length, Charset charset) { + return buf.toString(index, length, charset); + } + + @Override + public int hashCode() { + return buf.hashCode(); + } + + @Override + @SuppressWarnings("EqualsWhichDoesntCheckParameterClass") + public boolean equals(Object obj) { + return buf.equals(obj); + } + + @Override + public int compareTo(ByteBuf buffer) { + return buf.compareTo(buffer); + } + + @Override + public String toString() { + return StringUtil.simpleClassName(this) + '(' + buf.toString() + ')'; + } + + @Override + public ByteBuf retain(int increment) { + buf.retain(increment); + return this; + } + + @Override + public ByteBuf retain() { + buf.retain(); + return this; + } + + @Override + public ByteBuf touch() { + buf.touch(); + return this; + } + + @Override + public ByteBuf touch(Object hint) { + buf.touch(hint); + return this; + } + + @Override + public final boolean isReadable(int size) { + return buf.isReadable(size); + } + + @Override + public final boolean isWritable(int size) { + return buf.isWritable(size); + } + + @Override + public final int refCnt() { + return buf.refCnt(); + } + + @Override + public boolean release() { + return buf.release(); + } + + @Override + public boolean release(int decrement) { + return buf.release(decrement); + } + +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java index 5ff5a27e2d551..d456bbecfbd20 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java @@ -67,7 +67,7 @@ public void testEnqueueChunksBeforeRequest() { for (int i = 0; i < totalChunks; i++) { channel.writeInbound(randomContent(1024)); } - assertEquals(totalChunks * 1024, stream.buf().readableBytes()); + assertEquals(totalChunks * 1024, stream.bufSize()); } // ensures all received chunks can be flushed downstream @@ -119,7 +119,7 @@ public void testReadFromChannel() { channel.writeInbound(randomLastContent(chunkSize)); for (int i = 0; i < totalChunks; i++) { - assertNull("should not enqueue chunks", stream.buf()); + assertEquals("should not enqueue chunks", 0, stream.bufSize()); stream.next(); channel.runPendingTasks(); assertEquals("each next() should produce single chunk", i + 1, gotChunks.size()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyAllocatorTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyAllocatorTests.java new file mode 100644 index 0000000000000..b9e9b667e72fe --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyAllocatorTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.transport.netty4; + +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.Unpooled; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +import static org.elasticsearch.transport.netty4.NettyAllocator.TrashingByteBufAllocator; + +public class NettyAllocatorTests extends ESTestCase { + + static void assertBufferTrashed(BytesReference bytesRef) throws IOException { + var iter = bytesRef.iterator(); + BytesRef br; + while ((br = iter.next()) != null) { + for (var i = br.offset; i < br.offset + br.length; i++) { + assertEquals("off=" + br.offset + " len=" + br.length + " i=" + i, 0, br.bytes[i]); + } + } + } + + public void testTrashArrayByteBuf() { + var arr = randomByteArrayOfLength(between(1024, 2048)); + var buf = Unpooled.wrappedBuffer(arr); + var tBuf = new TrashingByteBuf(buf); + tBuf.release(); + var emptyArr = new byte[arr.length]; + assertArrayEquals(emptyArr, arr); + } + + public void testNioBufsTrashingByteBuf() { + var arrCnt = between(1, 16); + var byteArrs = new byte[arrCnt][]; + var byteBufs = new ByteBuffer[arrCnt]; + for (var i = 0; i < arrCnt; i++) { + byteArrs[i] = randomByteArrayOfLength(between(1024, 2048)); + byteBufs[i] = ByteBuffer.wrap(byteArrs[i]); + } + var buf = Unpooled.wrappedBuffer(byteBufs); + var tBuf = new TrashingByteBuf(buf); + tBuf.release(); + for (int i = 0; i < arrCnt; i++) { + for (int j = 0; j < byteArrs[i].length; j++) { + assertEquals(0, byteArrs[i][j]); + } + } + } + + public void testNioBufOffsetTrashingByteBuf() { + var arr = randomByteArrayOfLength(1024); + var off = 1; + var len = arr.length - 2; + arr[0] = 1; + arr[arr.length - 1] = 1; + var buf = Unpooled.wrappedBuffer(arr, off, len); + var tBuf = new TrashingByteBuf(buf); + tBuf.release(); + assertEquals(1, arr[0]); + assertEquals(1, arr[arr.length - 1]); + for (int i = 1; i < arr.length - 1; i++) { + assertEquals("at index " + i, 0, arr[i]); + } + } + + public void testTrashingByteBufAllocator() throws IOException { + var alloc = new TrashingByteBufAllocator(ByteBufAllocator.DEFAULT); + var size = between(1024 * 1024, 10 * 1024 * 1024); + + // use 3 different heap allocation methods + for (var buf : List.of(alloc.heapBuffer(), alloc.heapBuffer(1024), alloc.heapBuffer(1024, size))) { + buf.writeBytes(randomByteArrayOfLength(size)); + var bytesRef = Netty4Utils.toBytesReference(buf); + buf.release(); + assertBufferTrashed(bytesRef); + } + } + + public void testTrashingCompositeByteBuf() throws IOException { + var alloc = new TrashingByteBufAllocator(ByteBufAllocator.DEFAULT); + var compBuf = alloc.compositeHeapBuffer(); + for (var i = 0; i < between(1, 10); i++) { + var buf = alloc.heapBuffer().writeBytes(randomByteArrayOfLength(between(1024, 8192))); + compBuf.addComponent(true, buf); + } + var bytesRef = Netty4Utils.toBytesReference(compBuf); + compBuf.release(); + assertBufferTrashed(bytesRef); + } + +} diff --git a/muted-tests.yml b/muted-tests.yml index 22c304dc13b62..d01b956db9199 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -1,7 +1,4 @@ tests: -- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT - method: test {yaml=reference/esql/esql-async-query-api/line_17} - issue: https://github.com/elastic/elasticsearch/issues/109260 - class: "org.elasticsearch.client.RestClientSingleHostIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/102717" method: "testRequestResetAndAbort" @@ -11,18 +8,9 @@ tests: - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testDataStreams {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111448 -- class: org.elasticsearch.upgrades.FullClusterRestartIT - method: testSnapshotRestore {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111798 - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 -- class: org.elasticsearch.xpack.esql.action.ManyShardsIT - method: testRejection - issue: https://github.com/elastic/elasticsearch/issues/112406 -- class: org.elasticsearch.xpack.esql.action.ManyShardsIT - method: testConcurrentQueries - issue: https://github.com/elastic/elasticsearch/issues/112424 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 - class: org.elasticsearch.packaging.test.PackagesSecurityAutoConfigurationTests @@ -70,9 +58,6 @@ tests: - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=mtermvectors/10_basic/Tests catching other exceptions per item} issue: https://github.com/elastic/elasticsearch/issues/113325 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=search/500_date_range/from, to, include_lower, include_upper deprecated} - issue: https://github.com/elastic/elasticsearch/pull/113286 - class: org.elasticsearch.integration.KibanaUserRoleIntegTests method: testFieldMappings issue: https://github.com/elastic/elasticsearch/issues/113592 @@ -82,9 +67,6 @@ tests: - class: org.elasticsearch.xpack.transform.integration.TransformIT method: testStopWaitForCheckpoint issue: https://github.com/elastic/elasticsearch/issues/106113 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {categorize.Categorize SYNC} - issue: https://github.com/elastic/elasticsearch/issues/113722 - class: org.elasticsearch.kibana.KibanaThreadPoolIT method: testBlockedThreadPoolsRejectUserRequests issue: https://github.com/elastic/elasticsearch/issues/113939 @@ -97,9 +79,6 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testGet - issue: https://github.com/elastic/elasticsearch/issues/114135 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/rest-api/usage/line_38} issue: https://github.com/elastic/elasticsearch/issues/113694 @@ -121,9 +100,6 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testStalledShardMigrationProperlyDetected issue: https://github.com/elastic/elasticsearch/issues/115697 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testSupportedStream - issue: https://github.com/elastic/elasticsearch/issues/113430 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} issue: https://github.com/elastic/elasticsearch/issues/115808 @@ -139,35 +115,14 @@ tests: - class: org.elasticsearch.search.SearchServiceTests method: testParseSourceValidation issue: https://github.com/elastic/elasticsearch/issues/115936 -- class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT - method: testReindexWithShutdown - issue: https://github.com/elastic/elasticsearch/issues/115996 -- class: org.elasticsearch.search.query.SearchQueryIT - method: testAllDocsQueryString - issue: https://github.com/elastic/elasticsearch/issues/115728 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=cat.shards/10_basic/Help} issue: https://github.com/elastic/elasticsearch/issues/116110 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111774 -- class: org.elasticsearch.upgrades.FullClusterRestartIT - method: testSnapshotRestore {cluster=OLD} - issue: https://github.com/elastic/elasticsearch/issues/111777 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {categorize.Categorize SYNC} - issue: https://github.com/elastic/elasticsearch/issues/113054 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {categorize.Categorize ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/113055 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 @@ -180,72 +135,110 @@ tests: - class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT method: testSearchWithRandomDisconnects issue: https://github.com/elastic/elasticsearch/issues/116175 -- class: org.elasticsearch.search.basic.SearchWhileRelocatingIT - method: testSearchAndRelocateConcurrentlyRandomReplicas - issue: https://github.com/elastic/elasticsearch/issues/116145 - class: org.elasticsearch.xpack.deprecation.DeprecationHttpIT method: testDeprecatedSettingsReturnWarnings issue: https://github.com/elastic/elasticsearch/issues/108628 -- class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT - method: testEveryActionIsEitherOperatorOnlyOrNonOperator - issue: https://github.com/elastic/elasticsearch/issues/102992 - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests method: testBottomFieldSort issue: https://github.com/elastic/elasticsearch/issues/116249 - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 -- class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT - method: test {categorize.Categorize ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/116373 - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 - class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT method: testILMDownsampleRollingRestart issue: https://github.com/elastic/elasticsearch/issues/114233 -- class: org.elasticsearch.xpack.kql.query.KqlQueryBuilderTests - issue: https://github.com/elastic/elasticsearch/issues/116487 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testInvalidJSON issue: https://github.com/elastic/elasticsearch/issues/116521 - class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange issue: https://github.com/elastic/elasticsearch/issues/116523 -- class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT - method: testMatchAllQuery - issue: https://github.com/elastic/elasticsearch/issues/116536 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {categorize.Categorize} - issue: https://github.com/elastic/elasticsearch/issues/116434 -- class: org.elasticsearch.upgrades.SearchStatesIT - method: testBWCSearchStates - issue: https://github.com/elastic/elasticsearch/issues/116617 -- class: org.elasticsearch.upgrades.SearchStatesIT - method: testCanMatch - issue: https://github.com/elastic/elasticsearch/issues/116618 -- class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests - method: test20NoAutoGenerationWhenAutoConfigurationDisabled - issue: https://github.com/elastic/elasticsearch/issues/116619 -- class: org.elasticsearch.packaging.test.BootstrapCheckTests - method: test20RunWithBootstrapChecks - issue: https://github.com/elastic/elasticsearch/issues/116620 -- class: org.elasticsearch.packaging.test.DockerTests - method: test011SecurityEnabledStatus - issue: https://github.com/elastic/elasticsearch/issues/116628 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 -- class: org.elasticsearch.snapshots.SnapshotShutdownIT - method: testRestartNodeDuringSnapshot - issue: https://github.com/elastic/elasticsearch/issues/116730 -- class: org.elasticsearch.xpack.inference.InferenceRestIT - issue: https://github.com/elastic/elasticsearch/issues/116740 -- class: org.elasticsearch.action.search.SearchRequestTests - method: testSerializationConstants - issue: https://github.com/elastic/elasticsearch/issues/116752 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryGroupsResolverTests issue: https://github.com/elastic/elasticsearch/issues/116182 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=snapshot/20_operator_privileges_disabled/Operator only settings can be set and restored by non-operator user when operator privileges is disabled} + issue: https://github.com/elastic/elasticsearch/issues/116775 +- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT + issue: https://github.com/elastic/elasticsearch/issues/116851 +- class: org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT + method: testRandomDirectoryIOExceptions + issue: https://github.com/elastic/elasticsearch/issues/114824 +- class: org.elasticsearch.xpack.restart.QueryBuilderBWCIT + method: testQueryBuilderBWC {p0=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/116989 +- class: org.elasticsearch.upgrades.QueryBuilderBWCIT + method: testQueryBuilderBWC {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/116990 +- class: org.elasticsearch.xpack.apmdata.APMYamlTestSuiteIT + method: test {yaml=/10_apm/Test template reinstallation} + issue: https://github.com/elastic/elasticsearch/issues/116445 +- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT + method: testMultipleInferencesTriggeringDownloadAndDeploy + issue: https://github.com/elastic/elasticsearch/issues/117208 +- class: org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderIT + method: testEnterpriseDownloaderTask + issue: https://github.com/elastic/elasticsearch/issues/115163 +- class: org.elasticsearch.versioning.ConcurrentSeqNoVersioningIT + method: testSeqNoCASLinearizability + issue: https://github.com/elastic/elasticsearch/issues/117249 +- class: org.elasticsearch.discovery.ClusterDisruptionIT + method: testAckedIndexing + issue: https://github.com/elastic/elasticsearch/issues/117024 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=snapshot/10_basic/Create a source only snapshot and then restore it} + issue: https://github.com/elastic/elasticsearch/issues/117295 +- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests + method: testRetryPointInTime + issue: https://github.com/elastic/elasticsearch/issues/117116 +- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT + method: testInferDeploysDefaultElser + issue: https://github.com/elastic/elasticsearch/issues/114913 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} + issue: https://github.com/elastic/elasticsearch/issues/117027 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} + issue: https://github.com/elastic/elasticsearch/issues/117349 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_reset/Test reset running transform} + issue: https://github.com/elastic/elasticsearch/issues/117473 +- class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/117525 +- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT + method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} + issue: https://github.com/elastic/elasticsearch/issues/116777 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testStopWorksInMiddleOfProcessing + issue: https://github.com/elastic/elasticsearch/issues/117591 +- class: "org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT" + method: "test {scoring.*}" + issue: https://github.com/elastic/elasticsearch/issues/117641 +- class: "org.elasticsearch.xpack.esql.qa.single_node.EsqlSpecIT" + method: "test {scoring.*}" + issue: https://github.com/elastic/elasticsearch/issues/117641 +- class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" + method: "test {scoring.*}" + issue: https://github.com/elastic/elasticsearch/issues/117641 +- class: org.elasticsearch.xpack.inference.InferenceCrudIT + method: testSupportedStream + issue: https://github.com/elastic/elasticsearch/issues/117745 +- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT + method: test {scoring.QstrWithFieldAndScoringSortedEval} + issue: https://github.com/elastic/elasticsearch/issues/117751 +- class: org.elasticsearch.search.ccs.CrossClusterIT + method: testCancel + issue: https://github.com/elastic/elasticsearch/issues/108061 +- class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT + method: test {p0=search.highlight/50_synthetic_source/text multi unified from vectors} + issue: https://github.com/elastic/elasticsearch/issues/117815 +- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT + issue: https://github.com/elastic/elasticsearch/issues/111319 # Examples: # diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 40b12c46c0bfe..05cd2cb44124c 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -28,7 +26,7 @@ dependencies { api "com.ibm.icu:icu4j:${versions.icu4j}" } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 6eb5b574b88f9..9549236775bfe 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -1,6 +1,3 @@ -import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -9,6 +6,9 @@ import org.elasticsearch.gradle.internal.info.BuildParams * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ + +import org.elasticsearch.gradle.LoggedExec + apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -63,11 +63,12 @@ TaskProvider createKey = tasks.register("createKey", LoggedExec) { keystore.parentFile.mkdirs() } outputs.file(keystore).withPropertyName('keystoreFile') - executable = "${BuildParams.runtimeJavaHome}/bin/keytool" + executable = "${buildParams.runtimeJavaHome.get()}/bin/keytool" getStandardInput().set('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n') + String keystorePath = projectDir.toPath().relativize(keystore.toPath()).toString() args '-genkey', '-alias', 'test-node', - '-keystore', keystore, + '-keystore', keystorePath, '-keyalg', 'RSA', '-keysize', '2048', '-validity', '712', diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index d9e86315d9468..980e2467206d7 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -57,7 +55,7 @@ tasks.register("writeTestJavaPolicy") { throw new GradleException("failed to create temporary directory [${tmp}]") } final File javaPolicy = file("${tmp}/java.policy") - if (BuildParams.inFipsJvm) { + if (buildParams.inFipsJvm) { javaPolicy.write( [ "grant {", @@ -77,6 +75,7 @@ tasks.register("writeTestJavaPolicy") { "permission java.security.SecurityPermission \"getProperty.jdk.tls.disabledAlgorithms\";", "permission java.security.SecurityPermission \"getProperty.jdk.certpath.disabledAlgorithms\";", "permission java.security.SecurityPermission \"getProperty.keystore.type.compat\";", + "permission java.security.SecurityPermission \"getProperty.org.bouncycastle.ec.max_f2m_field_size\";", "};" ].join("\n") ) @@ -98,7 +97,7 @@ tasks.named("test").configure { // this is needed to manipulate com.amazonaws.sdk.ec2MetadataServiceEndpointOverride system property // it is better rather disable security manager at all with `systemProperty 'tests.security.manager', 'false'` - if (BuildParams.inFipsJvm){ + if (buildParams.inFipsJvm){ nonInputProperties.systemProperty 'java.security.policy', "=file://${buildDir}/tmp/java.policy" } else { nonInputProperties.systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 5cdcdc59cafe9..5f0fee6636256 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -8,7 +8,6 @@ */ import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin @@ -55,8 +54,9 @@ tasks.named("yamlRestTest").configure { enabled = false } ['KeyStore', 'EnvVariables', 'SystemProperties', 'ContainerCredentials', 'InstanceProfile'].forEach { action -> TaskProvider fixture = tasks.register("ec2Fixture${action}", AntFixture) { dependsOn project.sourceSets.yamlRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" - executable = "${BuildParams.runtimeJavaHome}/bin/java" + FileCollection cp = project.sourceSets.yamlRestTest.runtimeClasspath + env 'CLASSPATH', "${-> cp.asPath}" + executable = "${buildParams.runtimeJavaHome.get() }/bin/java" args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/yamlRestTest${action}-1/config/unicast_hosts.txt" } @@ -68,9 +68,18 @@ tasks.named("yamlRestTest").configure { enabled = false } classpath = yamlRestTestSourceSet.getRuntimeClasspath() } + if(action == 'ContainerCredentials') { + def addressAndPortSource = fixture.get().addressAndPortSource + testClusters.matching { it.name == "yamlRestTestContainerCredentials" }.configureEach { + environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', + () -> addressAndPortSource.map{ addr -> "http://${addr}/ecs_credentials_endpoint" }.get(), IGNORE_VALUE + } + } + tasks.named("check").configure { dependsOn(yamlRestTestTask) } + def addressAndPortSource = fixture.get().addressAndPortSource testClusters.matching { it.name == yamlRestTestTask.name}.configureEach { numberOfNodes = ec2NumberOfNodes @@ -78,9 +87,9 @@ tasks.named("yamlRestTest").configure { enabled = false } setting 'discovery.seed_providers', 'ec2' setting 'network.host', '_ec2_' - setting 'discovery.ec2.endpoint', { "http://${-> fixture.get().addressAndPort}" }, IGNORE_VALUE + setting 'discovery.ec2.endpoint', { "http://${-> addressAndPortSource.get()}" }, IGNORE_VALUE - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${-> fixture.get().addressAndPort}" }, IGNORE_VALUE + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${-> addressAndPortSource.get()}" }, IGNORE_VALUE } } @@ -107,11 +116,6 @@ tasks.named("ec2FixtureContainerCredentials").configure { env 'ACTIVATE_CONTAINER_CREDENTIALS', true } -testClusters.matching { it.name == "yamlRestTestContainerCredentials" }.configureEach { - environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', - { "http://${-> tasks.findByName("ec2FixtureContainerCredentials").addressAndPort}/ecs_credentials_endpoint" }, IGNORE_VALUE -} - // Extra config for InstanceProfile tasks.named("ec2FixtureInstanceProfile").configure { env 'ACTIVATE_INSTANCE_PROFILE', true diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 14a904e107188..72cb429b49072 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -7,9 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ - import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE @@ -32,7 +30,7 @@ restResources { def gceFixtureProvider = tasks.register("gceFixture", AntFixture) { dependsOn project.sourceSets.yamlRestTest.runtimeClasspath env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" args 'org.elasticsearch.cloud.gce.GCEFixture', baseDir, "${buildDir}/testclusters/yamlRestTest-1/config/unicast_hosts.txt" } diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index 6acc1431eaec1..22286c90de3d1 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=2ab88d6de2c23e6adae7363ae6e29cbdd2a709e992929b48b6530fd0c7133bd6 -distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip +distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a +distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index d0b1163970616..435ad83974efa 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -8,20 +6,14 @@ import org.elasticsearch.gradle.internal.info.BuildParams * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' classname 'org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextPlugin' } -if (BuildParams.isSnapshotBuild() == false) { - tasks.named("test").configure { - systemProperty 'es.index_mode_feature_flag_registered', 'true' - } -} - restResources { restApi { include '_common', 'indices', 'index', 'search' diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java b/plugins/mapper-annotated-text/src/yamlRestTest/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java index afb23106bc101..68d141b6df840 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java +++ b/plugins/mapper-annotated-text/src/yamlRestTest/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextClientYamlTestSuiteIT.java @@ -12,8 +12,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class AnnotatedTextClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -25,4 +27,12 @@ public AnnotatedTextClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate public static Iterable parameters() throws Exception { return createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().plugin("mapper-annotated-text").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 0fa710c130a29..0271296df934d 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License @@ -8,8 +6,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' @@ -20,12 +18,7 @@ esplugin { dependencies { compileOnly project(':modules:lang-painless:spi') testImplementation project(':modules:lang-painless') -} - -if (BuildParams.isSnapshotBuild() == false) { - tasks.named("test").configure { - systemProperty 'es.index_mode_feature_flag_registered', 'true' - } + clusterModules project(':modules:lang-painless') } restResources { @@ -33,8 +26,3 @@ restResources { include '_common', 'indices', 'index', 'search' } } - -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.security.enabled', 'false' -} diff --git a/plugins/mapper-murmur3/src/yamlRestTest/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java b/plugins/mapper-murmur3/src/yamlRestTest/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java index d4b0f2e0dc6a7..399b488a5d2f7 100644 --- a/plugins/mapper-murmur3/src/yamlRestTest/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java +++ b/plugins/mapper-murmur3/src/yamlRestTest/java/org/elasticsearch/index/mapper/murmur3/MapperMurmur3ClientYamlTestSuiteIT.java @@ -12,8 +12,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class MapperMurmur3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -25,4 +27,12 @@ public MapperMurmur3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate public static Iterable parameters() throws Exception { return createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("lang-painless").plugin("mapper-murmur3").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index 3c47a43788b48..c2251910c3122 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -25,6 +25,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; @@ -136,14 +137,11 @@ public void testWildCardWithFieldsWhenDisabled() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=false")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - assertResponse( + assertResponses( + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")), prepareSearch("test").addFetchField("_size"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) - ); - - assertResponse( prepareSearch("test").addFetchField("*"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + prepareSearch("test").addStoredField("*") ); assertResponse( @@ -156,19 +154,11 @@ public void testWildCardWithFieldsWhenNotProvided() throws Exception { assertAcked(prepareCreate("test")); final String source = "{\"f\":\"" + randomAlphaOfLengthBetween(1, 100) + "\"}"; indexRandom(true, prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); - assertResponse( + assertResponses( + response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")), prepareSearch("test").addFetchField("_size"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) - ); - - assertResponse( prepareSearch("test").addFetchField("*"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) - ); - - assertResponse( - prepareSearch("test").addStoredField("*"), - response -> assertNull(response.getHits().getHits()[0].getFields().get("_size")) + prepareSearch("test").addStoredField("*") ); } } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 741542477e446..4da7c24de80f1 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.OS -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' @@ -20,7 +19,7 @@ esplugin { } versions << [ - 'hadoop': '3.3.3' + 'hadoop': '3.4.1' ] configurations { @@ -42,9 +41,9 @@ dependencies { api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" - api 'commons-cli:commons-cli:1.2' + api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" - api 'commons-io:commons-io:2.8.0' + api 'commons-io:commons-io:2.16.1' api 'org.apache.commons:commons-lang3:3.11' api 'javax.servlet:javax.servlet-api:3.1.0' api "org.slf4j:slf4j-api:${versions.slf4j}" @@ -58,14 +57,14 @@ dependencies { javaRestTestImplementation project(':test:fixtures:krb5kdc-fixture') javaRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" javaRestTestRuntimeOnly "com.google.guava:guava:16.0.1" - javaRestTestRuntimeOnly "commons-cli:commons-cli:1.2" + javaRestTestRuntimeOnly "commons-cli:commons-cli:1.5.0" javaRestTestRuntimeOnly "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" yamlRestTestCompileOnly(project(':test:fixtures:hdfs-fixture')) yamlRestTestImplementation project(':test:fixtures:krb5kdc-fixture') yamlRestTestImplementation "org.slf4j:slf4j-api:${versions.slf4j}" yamlRestTestRuntimeOnly "com.google.guava:guava:16.0.1" - yamlRestTestRuntimeOnly "commons-cli:commons-cli:1.2" + yamlRestTestRuntimeOnly "commons-cli:commons-cli:1.5.0" yamlRestTestRuntimeOnly "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" hdfsFixture2 project(path: ':test:fixtures:hdfs-fixture', configuration: 'shadowedHdfs2') @@ -84,7 +83,7 @@ tasks.named("dependencyLicenses").configure { tasks.withType(RestIntegTestTask).configureEach { usesDefaultDistribution() - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } @@ -178,7 +177,6 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'org.apache.hadoop.thirdparty.com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64', 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$1', 'org.apache.hadoop.thirdparty.com.google.common.hash.Striped64$Cell', @@ -189,6 +187,9 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil', 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$1', 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$MemoryAccessor' + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.hadoop.thirdparty.protobuf.MessageSchema', + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'org.apache.hadoop.thirdparty.protobuf.UnsafeUtil$Android64MemoryAccessor' ) } diff --git a/plugins/repository-hdfs/hadoop-client-api/build.gradle b/plugins/repository-hdfs/hadoop-client-api/build.gradle index 4ac6f79530fcb..24e4213780fe2 100644 --- a/plugins/repository-hdfs/hadoop-client-api/build.gradle +++ b/plugins/repository-hdfs/hadoop-client-api/build.gradle @@ -1,5 +1,5 @@ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' dependencies { implementation "org.apache.hadoop:hadoop-client-api:${project.parent.versions.hadoop}" diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java index 98aa9951172ba..ce6acd79a0bb9 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java @@ -47,7 +47,8 @@ class HdfsSecurityContext { // 2) allow hadoop to add credentials to our Subject new AuthPermission("modifyPrivateCredentials"), // 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client - new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read") }; + new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read"), + new RuntimePermission("getClassLoader") }; // If Security is enabled, we need all the following elevated permissions: KERBEROS_AUTH_PERMISSIONS = new Permission[] { diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index 585124f223c9c..e63b1629db39c 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -8,7 +8,6 @@ */ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -16,7 +15,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /** * We execute tests 3 times. @@ -52,7 +51,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) } - onlyIf("FIPS mode disabled") { BuildParams.inFipsJvm == false } + onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/entitlements/build.gradle b/qa/entitlements/build.gradle index 2621d2731f411..9a5058a3b11ac 100644 --- a/qa/entitlements/build.gradle +++ b/qa/entitlements/build.gradle @@ -18,21 +18,8 @@ esplugin { classname 'org.elasticsearch.test.entitlements.EntitlementsCheckPlugin' } -configurations { - entitlementBridge { - canBeConsumed = false - } -} - dependencies { clusterPlugins project(':qa:entitlements') - entitlementBridge project(':libs:entitlement:bridge') -} - -tasks.named('javaRestTest') { - systemProperty "tests.entitlement-bridge.jar-name", configurations.entitlementBridge.singleFile.getName() - usesDefaultDistribution() - systemProperty "tests.security.manager", "false" } tasks.named("javadoc").configure { diff --git a/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java index a62add89c51e6..f8bae10492ba8 100644 --- a/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java +++ b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java @@ -10,9 +10,7 @@ package org.elasticsearch.test.entitlements; import org.elasticsearch.client.Request; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.ClassRule; @@ -20,21 +18,13 @@ import static org.hamcrest.Matchers.containsString; -@ESTestCase.WithoutSecurityManager public class EntitlementsIT extends ESRestTestCase { - private static final String ENTITLEMENT_BRIDGE_JAR_NAME = System.getProperty("tests.entitlement-bridge.jar-name"); - @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.INTEG_TEST) .plugin("entitlement-qa") .systemProperty("es.entitlements.enabled", "true") .setting("xpack.security.enabled", "false") - .jvmArg("-Djdk.attach.allowAttachSelf=true") - .jvmArg("-XX:+EnableDynamicAgentLoading") - .jvmArg("--patch-module=java.base=lib/entitlement-bridge/" + ENTITLEMENT_BRIDGE_JAR_NAME) - .jvmArg("--add-exports=java.base/org.elasticsearch.entitlement.bridge=org.elasticsearch.entitlement") .build(); @Override @@ -49,4 +39,11 @@ public void testCheckSystemExit() { ); assertThat(exception.getMessage(), containsString("not_entitled_exception")); } + + public void testCheckCreateURLClassLoader() { + var exception = expectThrows(IOException.class, () -> { + client().performRequest(new Request("GET", "/_entitlement/_check_create_url_classloader")); + }); + assertThat(exception.getMessage(), containsString("not_entitled_exception")); + } } diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java index f3821c065eceb..94ad54c8c8ba8 100644 --- a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java +++ b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java @@ -22,7 +22,6 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; -import java.util.Collections; import java.util.List; import java.util.function.Predicate; import java.util.function.Supplier; @@ -42,6 +41,6 @@ public List getRestHandlers( final Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - return Collections.singletonList(new RestEntitlementsCheckSystemExitAction()); + return List.of(new RestEntitlementsCheckSystemExitAction(), new RestEntitlementsCheckClassLoaderAction()); } } diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java new file mode 100644 index 0000000000000..0b5ca28739ed0 --- /dev/null +++ b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckClassLoaderAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.entitlements; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; + +import java.net.URL; +import java.net.URLClassLoader; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestEntitlementsCheckClassLoaderAction extends BaseRestHandler { + + private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckClassLoaderAction.class); + + RestEntitlementsCheckClassLoaderAction() {} + + @Override + public List routes() { + return List.of(new Route(GET, "/_entitlement/_check_create_url_classloader")); + } + + @Override + public String getName() { + return "check_classloader_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + logger.info("RestEntitlementsCheckClassLoaderAction rest handler [{}]", request.path()); + if (request.path().equals("/_entitlement/_check_create_url_classloader")) { + return channel -> { + logger.info("Calling new URLClassLoader"); + try (var classLoader = new URLClassLoader("test", new URL[0], this.getClass().getClassLoader())) { + logger.info("Created URLClassLoader [{}]", classLoader.getName()); + } + }; + } + + throw new UnsupportedOperationException(); + } +} diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 8d950eea616d6..5e68c4d1ad26b 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -7,14 +7,13 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartArchivedSettingsIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartArchivedSettingsIT.java new file mode 100644 index 0000000000000..caa57f1e605a2 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartArchivedSettingsIT.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.upgrades; + +import io.netty.handler.codec.http.HttpMethod; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.core.UpdateForV10; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ObjectPath; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.THRESHOLD_SETTING; + +/** + * Tests to run before and after a full cluster restart. This is run twice, + * one with {@code tests.is_old_cluster} set to {@code true} against a cluster + * of an older version. The cluster is shutdown and a cluster of the new + * version is started with the same data directories and then this is rerun + * with {@code tests.is_old_cluster} set to {@code false}. + */ +public class FullClusterRestartArchivedSettingsIT extends ParameterizedFullClusterRestartTestCase { + + private static TemporaryFolder repoDirectory = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .apply(() -> clusterConfig) + .feature(FeatureFlag.TIME_SERIES_MODE) + .feature(FeatureFlag.FAILURE_STORE_ENABLED) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + public FullClusterRestartArchivedSettingsIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this test is just about v8->v9 upgrades, remove it in v10 + public void testBalancedShardsAllocatorThreshold() throws Exception { + assumeTrue("test only applies for v8->v9 upgrades", getOldClusterTestVersion().getMajor() == 8); + + final var chosenValue = randomFrom("0", "0.1", "0.5", "0.999"); + + if (isRunningAgainstOldCluster()) { + final var request = newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent").field(THRESHOLD_SETTING.getKey(), chosenValue).endObject() + ); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); + assertOK(client().performRequest(request)); + } + + final var clusterSettingsResponse = ObjectPath.createFromResponse( + client().performRequest(new Request("GET", "/_cluster/settings")) + ); + + final var settingsPath = "persistent." + THRESHOLD_SETTING.getKey(); + final var settingValue = clusterSettingsResponse.evaluate(settingsPath); + + if (isRunningAgainstOldCluster()) { + assertEquals(chosenValue, settingValue); + } else { + assertNull(settingValue); + assertNotNull(clusterSettingsResponse.evaluate("persistent.archived." + THRESHOLD_SETTING.getKey())); + } + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java index 3a983dbd058df..d98d53baf9015 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartDownsampleIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.junit.Before; import org.junit.ClassRule; import org.junit.rules.RuleChain; @@ -269,10 +268,7 @@ private String getRollupIndexName() throws IOException { } public void testRollupIndex() throws Exception { - assumeTrue( - "Downsample got many stability improvements in 8.10.0", - oldClusterHasFeature(RestTestLegacyFeatures.TSDB_DOWNSAMPLING_STABLE) - ); + assumeTrue("Downsample got many stability improvements in 8.10.0", oldClusterHasFeature("gte_v8.10.0")); if (isRunningAgainstOldCluster()) { createIlmPolicy(); createIndex(); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index fcca3f9a4700c..0f41712abe927 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -16,11 +16,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.common.Strings; @@ -29,7 +27,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -44,7 +41,6 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -75,12 +71,12 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.THRESHOLD_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -90,6 +86,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; /** * Tests to run before and after a full cluster restart. This is run twice, @@ -264,7 +261,7 @@ public void testNewReplicas() throws Exception { } public void testSearchTimeSeriesMode() throws Exception { - assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_NEW_INDEX_FORMAT)); + assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature("gte_v8.2.0")); int numDocs; if (isRunningAgainstOldCluster()) { numDocs = createTimeSeriesModeIndex(1); @@ -302,7 +299,7 @@ public void testSearchTimeSeriesMode() throws Exception { } public void testNewReplicasTimeSeriesMode() throws Exception { - assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_NEW_INDEX_FORMAT)); + assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature("gte_v8.2.0")); if (isRunningAgainstOldCluster()) { createTimeSeriesModeIndex(0); } else { @@ -1277,12 +1274,16 @@ private void checkSnapshot(String snapshotName, int count, String tookOnVersion, assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); // the format can change depending on the ES node version running & this test code running + // and if there's an in-progress release that hasn't been published yet, + // which could affect the top range of the index release version + String firstReleaseVersion = tookOnIndexVersion.toReleaseVersion().split("-")[0]; assertThat( - XContentMapValues.extractValue("snapshots.version", snapResponse), + (Iterable) XContentMapValues.extractValue("snapshots.version", snapResponse), anyOf( - equalTo(List.of(tookOnVersion)), - equalTo(List.of(tookOnIndexVersion.toString())), - equalTo(List.of(tookOnIndexVersion.toReleaseVersion())) + contains(tookOnVersion), + contains(tookOnIndexVersion.toString()), + contains(firstReleaseVersion), + contains(startsWith(firstReleaseVersion + "-")) ) ); @@ -1953,35 +1954,4 @@ public static void assertNumHits(String index, int numHits, int totalShards) thr assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); assertThat(extractTotalHits(resp), equalTo(numHits)); } - - @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this test is just about v8->v9 upgrades, remove it in v10 - public void testBalancedShardsAllocatorThreshold() throws Exception { - assumeTrue("test only applies for v8->v9 upgrades", getOldClusterTestVersion().getMajor() == 8); - - final var chosenValue = randomFrom("0", "0.1", "0.5", "0.999"); - - if (isRunningAgainstOldCluster()) { - final var request = newXContentRequest( - HttpMethod.PUT, - "/_cluster/settings", - (builder, params) -> builder.startObject("persistent").field(THRESHOLD_SETTING.getKey(), chosenValue).endObject() - ); - request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); - assertOK(client().performRequest(request)); - } - - final var clusterSettingsResponse = ObjectPath.createFromResponse( - client().performRequest(new Request("GET", "/_cluster/settings")) - ); - - final var settingsPath = "persistent." + THRESHOLD_SETTING.getKey(); - final var settingValue = clusterSettingsResponse.evaluate(settingsPath); - - if (isRunningAgainstOldCluster()) { - assertEquals(chosenValue, settingValue); - } else { - assertNull(settingValue); - assertNotNull(clusterSettingsResponse.evaluate("persistent.archived." + THRESHOLD_SETTING.getKey())); - } - } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java index f1f4fcf091e8f..9866d94dccc3c 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeFullClusterRestartIT.java @@ -17,10 +17,8 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; -import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.ClassRule; @@ -31,9 +29,6 @@ import java.util.Map; import java.util.function.Supplier; -import static org.elasticsearch.test.MapMatcher.assertMap; -import static org.elasticsearch.test.MapMatcher.matchesMap; - public class LogsIndexModeFullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { @ClassRule @@ -125,8 +120,6 @@ protected ElasticsearchCluster getUpgradeCluster() { }"""; public void testLogsIndexing() throws IOException { - assumeTrue("Test uses data streams", oldClusterHasFeature(RestTestLegacyFeatures.DATA_STREAMS_SUPPORTED)); - if (isRunningAgainstOldCluster()) { assertOK(client().performRequest(putTemplate(client(), "logs-template", STANDARD_TEMPLATE))); assertOK(client().performRequest(createDataStream("logs-apache-production"))); @@ -172,22 +165,16 @@ public void testLogsIndexing() throws IOException { assertOK(bulkIndexResponse); assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); - assertIndexMappingsAndSettings(0, Matchers.nullValue(), matchesMap().extraOk()); - assertIndexMappingsAndSettings( - 1, - Matchers.equalTo("logsdb"), - matchesMap().extraOk().entry("_source", Map.of("mode", "synthetic")) - ); + assertIndexSettings(0, Matchers.nullValue()); + assertIndexSettings(1, Matchers.equalTo("logsdb")); } } - private void assertIndexMappingsAndSettings(int backingIndex, final Matcher indexModeMatcher, final MapMatcher mappingsMatcher) - throws IOException { + private void assertIndexSettings(int backingIndex, final Matcher indexModeMatcher) throws IOException { assertThat( getSettings(client(), getWriteBackingIndex(client(), "logs-apache-production", backingIndex)).get("index.mode"), indexModeMatcher ); - assertMap(getIndexMappingAsMap(getWriteBackingIndex(client(), "logs-apache-production", backingIndex)), mappingsMatcher); } private static Request createDataStream(final String dataStreamName) { diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 9ca420efe1156..aac2c661dea9f 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -12,8 +12,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -23,7 +21,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.DisMaxQueryBuilder; @@ -43,7 +40,6 @@ import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import org.junit.ClassRule; @@ -249,23 +245,10 @@ public void testQueryBuilderBWC() throws Exception { InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length); StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry) ) { - - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) // condition will always be true - var originalClusterHasTransportVersion = oldClusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED); - final TransportVersion transportVersion; - if (originalClusterHasTransportVersion == false) { - transportVersion = TransportVersion.fromId( - parseLegacyVersion(getOldClusterVersion()).map(Version::id).orElse(TransportVersions.MINIMUM_COMPATIBLE.id()) - ); - } else { - transportVersion = TransportVersion.readVersion(input); - } - - input.setTransportVersion(transportVersion); + input.setTransportVersion(TransportVersion.readVersion(input)); QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); assert in.read() == -1; assertEquals(expectedQueryBuilder, queryBuilder); - } } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index f3fd57f3fc8ae..d8f906b23d523 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -10,8 +10,11 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils +import org.elasticsearch.gradle.testclusters.TestClustersPlugin apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -64,8 +67,9 @@ excludeList.add('indices.resolve_index/20_resolve_system_index/*') // Excluded because the error has changed excludeList.add('aggregations/percentiles_hdr_metric/Negative values test') -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +def clusterPath = getPath() +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> if (bwcVersion != VersionProperties.getElasticsearchVersion()) { /* This project runs the core REST tests against a 4 node cluster where two of the nodes has a different minor. */ @@ -86,18 +90,42 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { useCluster baseCluster mustRunAfter("precommit") + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + + def baseInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set(baseName) + it.parameters.service = serviceProvider + }.map { it.getAllHttpSocketURI() } + + def baseInfoAfterOneNodeUpdate = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set(baseName) + it.parameters.service = serviceProvider + }.map { it.getAllHttpSocketURI() } + + def baseInfoAfterTwoNodesUpdate = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set(baseName) + it.parameters.service = serviceProvider + }.map { it.getAllHttpSocketURI() } + def nonInputProps = nonInputProperties + def sharedRepoFolder = new File(buildDir, "cluster/shared/repo/${baseName}") doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete(sharedRepoFolder) // Getting the endpoints causes a wait for the cluster - println "Test cluster endpoints are: ${-> baseCluster.get().allHttpSocketURI.join(",")}" + println "Test cluster endpoints are: ${-> baseInfo.get().join(",")}" println "Upgrading one node to create a mixed cluster" baseCluster.get().nextNodeToNextVersion() // Getting the endpoints causes a wait for the cluster - println "Upgrade complete, endpoints are: ${-> baseCluster.get().allHttpSocketURI.join(",")}" + println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get().join(",")}" println "Upgrading another node to create a mixed cluster" baseCluster.get().nextNodeToNextVersion() - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) + nonInputProps.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate.map(c -> c.join(","))) + nonInputProps.systemProperty('tests.clustername', baseName) if (excludeList.isEmpty() == false) { systemProperty 'tests.rest.blacklist', excludeList.join(',') } @@ -105,7 +133,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } +// onlyIf("BWC tests disabled") { project.bwc_tests_enabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 146acedd164b2..d46bf3f18f8cc 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -15,7 +15,6 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -35,7 +34,7 @@ def ccsSupportedVersion = bwcVersion -> { return currentVersion.minor == 0 || (currentVersion.major == bwcVersion.major && currentVersion.minor - bwcVersion.minor <= 1) } -BuildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, baseName -> def remoteCluster = testClusters.register("${baseName}-remote") { numberOfNodes = 2 diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java index a60e58c34918b..9f9aa78a4910b 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java @@ -12,18 +12,31 @@ import junit.framework.TestCase; import org.elasticsearch.packaging.util.Distribution; -import org.elasticsearch.packaging.util.FileUtils; +import org.elasticsearch.packaging.util.LintianResultParser; +import org.elasticsearch.packaging.util.LintianResultParser.Issue; +import org.elasticsearch.packaging.util.LintianResultParser.Result; import org.elasticsearch.packaging.util.Shell; import org.junit.BeforeClass; +import java.util.List; import java.util.Locale; import java.util.regex.Pattern; +import java.util.stream.Collectors; import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; import static org.junit.Assume.assumeTrue; public class DebMetadataTests extends PackagingTestCase { + private final LintianResultParser lintianParser = new LintianResultParser(); + private static final List IGNORED_TAGS = List.of( + // Override syntax changes between lintian versions in a non-backwards compatible way, so we have to tolerate these. + // Tag mismatched-override is a non-erasable tag which cannot be ignored with overrides, so we handle it here. + "mismatched-override", + // systemd-service-file-outside-lib has been incorrect and removed in the newer version on Lintian + "systemd-service-file-outside-lib" + ); + @BeforeClass public static void filterDistros() { assumeTrue("only deb", distribution.packaging == Distribution.Packaging.DEB); @@ -35,15 +48,26 @@ public void test05CheckLintian() { if (helpText.contains("--fail-on-warnings")) { extraArgs = "--fail-on-warnings"; } else if (helpText.contains("--fail-on error")) { - extraArgs = "--fail-on warning"; - // Recent lintian versions are picky about malformed or mismatched overrides. - // Unfortunately override syntax changes between lintian versions in a non-backwards compatible - // way, so we have to tolerate these (or maintain separate override files per lintian version). - if (helpText.contains("--suppress-tags")) { - extraArgs += " --suppress-tags malformed-override,mismatched-override"; + extraArgs = "--fail-on error,warning"; + } + Shell.Result result = sh.runIgnoreExitCode( + String.format(Locale.ROOT, "lintian %s %s", extraArgs, getDistributionFile(distribution())) + ); + Result lintianResult = lintianParser.parse(result.stdout()); + // Unfortunately Lintian overrides syntax changes between Lintian versions in a non-backwards compatible + // way, so we have to manage some exclusions outside the overrides file. + if (lintianResult.isSuccess() == false) { + List importantIssues = lintianResult.issues() + .stream() + .filter(issue -> IGNORED_TAGS.contains(issue.tag()) == false) + .toList(); + if (importantIssues.isEmpty() == false) { + fail( + "Issues for DEB package found by Lintian:\n" + + importantIssues.stream().map(Record::toString).collect(Collectors.joining("\n")) + ); } } - sh.run(String.format(Locale.ROOT, "lintian %s %s", extraArgs, FileUtils.getDistributionFile(distribution()))); } public void test06Dependencies() { diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 8cb8354eb5d71..3ad4c247a8b9b 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -96,11 +96,10 @@ /** * This class tests the Elasticsearch Docker images. We have several: *
    - *
  • The default image with a custom, small base image
  • - *
  • A UBI-based image
  • + *
  • The default image UBI-based image
  • *
  • Another UBI image for Iron Bank
  • *
  • A WOLFI-based image
  • - *
  • Images for Cloud
  • + *
  • Image for Cloud
  • *
*/ @ThreadLeakFilters(defaultFilters = true, filters = { HttpClientThreadsFilter.class }) @@ -383,15 +382,14 @@ public void test026InstallBundledRepositoryPluginsViaConfigFile() { public void test040JavaUsesTheOsProvidedKeystore() { final String path = sh.run("realpath jdk/lib/security/cacerts").stdout(); - if (distribution.packaging == Packaging.DOCKER_UBI || distribution.packaging == Packaging.DOCKER_IRON_BANK) { + if (distribution.packaging == Packaging.DOCKER || distribution.packaging == Packaging.DOCKER_IRON_BANK) { // In these images, the `cacerts` file ought to be a symlink here assertThat(path, equalTo("/etc/pki/ca-trust/extracted/java/cacerts")); } else if (distribution.packaging == Packaging.DOCKER_WOLFI || distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { // In these images, the `cacerts` file ought to be a symlink here assertThat(path, equalTo("/etc/ssl/certs/java/cacerts")); } else { - // Whereas on other images, it's a real file so the real path is the same - assertThat(path, equalTo("/usr/share/elasticsearch/jdk/lib/security/cacerts")); + fail("Unknown distribution: " + distribution.packaging); } } @@ -1126,25 +1124,25 @@ public void test171AdditionalCliOptionsAreForwarded() throws Exception { } /** - * Check that the UBI images has the correct license information in the correct place. + * Check that the Docker images have the correct license information in the correct place. */ - public void test200UbiImagesHaveLicenseDirectory() { - assumeTrue(distribution.packaging == Packaging.DOCKER_UBI); + public void test200ImagesHaveLicenseDirectory() { + assumeTrue(distribution.packaging != Packaging.DOCKER_IRON_BANK); final String[] files = sh.run("find /licenses -type f").stdout().split("\n"); assertThat(files, arrayContaining("/licenses/LICENSE")); // UBI image doesn't contain `diff` - final String ubiLicense = sh.run("cat /licenses/LICENSE").stdout(); + final String imageLicense = sh.run("cat /licenses/LICENSE").stdout(); final String distroLicense = sh.run("cat /usr/share/elasticsearch/LICENSE.txt").stdout(); - assertThat(ubiLicense, equalTo(distroLicense)); + assertThat(imageLicense, equalTo(distroLicense)); } /** - * Check that the UBI image has the expected labels + * Check that the images has the expected labels */ - public void test210UbiLabels() throws Exception { - assumeTrue(distribution.packaging == Packaging.DOCKER_UBI); + public void test210Labels() throws Exception { + assumeTrue(distribution.packaging != Packaging.DOCKER_IRON_BANK); final Map labels = getImageLabels(distribution); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java index 02e1ce35764cf..a47dd0e57642e 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java @@ -436,7 +436,7 @@ private void verifyKeystorePermissions() { switch (distribution.packaging) { case TAR, ZIP -> assertThat(keystore, file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); case DEB, RPM -> assertThat(keystore, file(File, "root", "elasticsearch", p660)); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat(keystore, DockerFileMatcher.file(p660)); + case DOCKER, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat(keystore, DockerFileMatcher.file(p660)); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index b4a00ca56924a..a157cc84e624e 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -245,7 +245,7 @@ protected static void install() throws Exception { installation = Packages.installPackage(sh, distribution); Packages.verifyPackageInstallation(installation, distribution, sh); } - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { + case DOCKER, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { installation = Docker.runContainer(distribution); Docker.verifyContainerInstallation(installation); } @@ -333,7 +333,6 @@ public Shell.Result runElasticsearchStartCommand(String password, boolean daemon case RPM: return Packages.runElasticsearchStartCommand(sh); case DOCKER: - case DOCKER_UBI: case DOCKER_IRON_BANK: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: @@ -355,7 +354,6 @@ public void stopElasticsearch() throws Exception { Packages.stopElasticsearch(sh); break; case DOCKER: - case DOCKER_UBI: case DOCKER_IRON_BANK: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: @@ -371,7 +369,7 @@ public void awaitElasticsearchStartup(Shell.Result result) throws Exception { switch (distribution.packaging) { case TAR, ZIP -> Archives.assertElasticsearchStarted(installation); case DEB, RPM -> Packages.assertElasticsearchStarted(sh, installation); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); + case DOCKER, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 11b8324384631..55c59db6219d3 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -29,8 +29,6 @@ public Distribution(Path path) { this.packaging = Packaging.TAR; } else if (filename.endsWith(".docker.tar")) { this.packaging = Packaging.DOCKER; - } else if (filename.endsWith(".ubi.tar")) { - this.packaging = Packaging.DOCKER_UBI; } else if (filename.endsWith(".ironbank.tar")) { this.packaging = Packaging.DOCKER_IRON_BANK; } else if (filename.endsWith(".cloud-ess.tar")) { @@ -61,7 +59,7 @@ public boolean isPackage() { */ public boolean isDocker() { return switch (packaging) { - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; + case DOCKER, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; default -> false; }; } @@ -73,7 +71,6 @@ public enum Packaging { DEB(".deb", Platforms.isDPKG()), RPM(".rpm", Platforms.isRPM()), DOCKER(".docker.tar", Platforms.isDocker()), - DOCKER_UBI(".ubi.tar", Platforms.isDocker()), DOCKER_IRON_BANK(".ironbank.tar", Platforms.isDocker()), DOCKER_CLOUD_ESS(".cloud-ess.tar", Platforms.isDocker()), DOCKER_WOLFI(".wolfi.tar", Platforms.isDocker()); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/LintianResultParser.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/LintianResultParser.java new file mode 100644 index 0000000000000..511080427ea77 --- /dev/null +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/LintianResultParser.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.packaging.util; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class LintianResultParser { + + private static final Logger logger = LogManager.getLogger(LintianResultParser.class); + private static final Pattern RESULT_PATTERN = Pattern.compile("(?[EW]): (?\\S+): (?\\S+) (?.+)"); + + public Result parse(String output) { + String[] lines = output.split("\n"); + List issues = Arrays.stream(lines).map(line -> { + Matcher matcher = RESULT_PATTERN.matcher(line); + if (matcher.matches() == false) { + logger.info("Lintian output not matching expected pattern: {}", line); + return null; + } + Severity severity = switch (matcher.group("severity")) { + case "E" -> Severity.ERROR; + case "W" -> Severity.WARNING; + default -> Severity.UNKNOWN; + }; + return new Issue(severity, matcher.group("tag"), matcher.group("message")); + }).filter(Objects::nonNull).toList(); + + return new Result(issues.stream().noneMatch(it -> it.severity == Severity.ERROR || it.severity == Severity.WARNING), issues); + } + + public record Result(boolean isSuccess, List issues) {} + + public record Issue(Severity severity, String tag, String message) {} + + enum Severity { + ERROR, + WARNING, + UNKNOWN + } +} diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index e3eac23d3ecce..5dc47993072a8 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -163,7 +163,6 @@ String build() { public static String getImageName(Distribution distribution) { String suffix = switch (distribution.packaging) { case DOCKER -> ""; - case DOCKER_UBI -> "-ubi"; case DOCKER_IRON_BANK -> "-ironbank"; case DOCKER_CLOUD_ESS -> "-cloud-ess"; case DOCKER_WOLFI -> "-wolfi"; diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index 17888efaa2b49..646a7974868c4 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -7,7 +7,6 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -16,7 +15,7 @@ apply plugin: 'elasticsearch.internal-test-artifact' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> String oldClusterName = "${baseName}-old" String newClusterName = "${baseName}-new" diff --git a/qa/rolling-upgrade-legacy/build.gradle b/qa/rolling-upgrade-legacy/build.gradle index 4ebb3888e9f20..e1c31fd50c0d4 100644 --- a/qa/rolling-upgrade-legacy/build.gradle +++ b/qa/rolling-upgrade-legacy/build.gradle @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -18,7 +17,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /* * NOTE: This module is for the tests that were problematic when converting :qa:rolling-upgrade to the junit-based bwc test definition * Over time, these should be migrated into the :qa:rolling-upgrade module and fixed properly diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index ef31f6421c187..1d7475427b33b 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -7,7 +7,6 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -18,7 +17,7 @@ testArtifacts { registerTestArtifactFromSourceSet(sourceSets.javaRestTest) } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0d1e7aafa637..d9adec47ff483 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -14,14 +14,12 @@ import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodeWithStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -43,24 +41,7 @@ public DesiredNodesUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { desiredNodesVersion = upgradedNodes + 1; } - private enum ProcessorsPrecision { - DOUBLE, - FLOAT - } - public void testUpgradeDesiredNodes() throws Exception { - assumeTrue("Desired nodes was introduced in 8.1", oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_NODE_API_SUPPORTED)); - - if (oldClusterHasFeature(DesiredNode.DOUBLE_PROCESSORS_SUPPORTED)) { - assertUpgradedNodesCanReadDesiredNodes(); - } else if (oldClusterHasFeature(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED)) { - assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); - } else { - assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); - } - } - - private void assertUpgradedNodesCanReadDesiredNodes() throws Exception { if (isMixedCluster() || isUpgradedCluster()) { final Map desiredNodes = getLatestDesiredNodes(); final String historyId = extractValue(desiredNodes, "history_id"); @@ -69,60 +50,10 @@ private void assertUpgradedNodesCanReadDesiredNodes() throws Exception { assertThat(version, is(equalTo(desiredNodesVersion - 1))); } - addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(desiredNodesVersion, ProcessorsPrecision.DOUBLE); + addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(desiredNodesVersion); assertAllDesiredNodesAreActualized(); } - private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws Exception { - // We define the same set of desired nodes to ensure that they are equal across all - // the test runs, otherwise we cannot guarantee an idempotent update in this test - final var desiredNodes = getNodeNames().stream() - .map( - nodeName -> new DesiredNode( - Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), - 1238.49922909, - ByteSizeValue.ofGb(32), - ByteSizeValue.ofGb(128), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() - ) - ) - .toList(); - - if (isMixedCluster()) { - updateDesiredNodes(desiredNodes, desiredNodesVersion - 1); - } - for (int i = 0; i < 2; i++) { - updateDesiredNodes(desiredNodes, desiredNodesVersion); - } - - final Map latestDesiredNodes = getLatestDesiredNodes(); - final int latestDesiredNodesVersion = extractValue(latestDesiredNodes, "version"); - assertThat(latestDesiredNodesVersion, is(equalTo(desiredNodesVersion))); - - if (isUpgradedCluster()) { - assertAllDesiredNodesAreActualized(); - } - } - - private void assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions() throws Exception { - if (isOldCluster()) { - addClusterNodesToDesiredNodesWithIntegerProcessors(1); - } else if (isMixedCluster()) { - // Processor ranges or float processors are forbidden during upgrades: 8.2 -> 8.3 clusters - final var responseException = expectThrows( - ResponseException.class, - () -> addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(desiredNodesVersion, ProcessorsPrecision.FLOAT) - ); - final var statusCode = responseException.getResponse().getStatusLine().getStatusCode(); - assertThat(statusCode, is(equalTo(400))); - } else { - assertAllDesiredNodesAreActualized(); - addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(4, ProcessorsPrecision.FLOAT); - } - - getLatestDesiredNodes(); - } - private Map getLatestDesiredNodes() throws IOException { final var getDesiredNodesRequest = new Request("GET", "/_internal/desired_nodes/_latest"); final var response = client().performRequest(getDesiredNodesRequest); @@ -143,15 +74,14 @@ private void assertAllDesiredNodesAreActualized() throws Exception { } } - private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int version, ProcessorsPrecision processorsPrecision) - throws Exception { + private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int version) throws Exception { final List nodes; if (randomBoolean()) { nodes = getNodeNames().stream() .map( nodeName -> new DesiredNode( Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), - processorsPrecision == ProcessorsPrecision.DOUBLE ? randomDoubleProcessorCount() : 0.5f, + randomDoubleProcessorCount(), ByteSizeValue.ofGb(randomIntBetween(10, 24)), ByteSizeValue.ofGb(randomIntBetween(128, 256)), clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() @@ -160,9 +90,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve .toList(); } else { nodes = getNodeNames().stream().map(nodeName -> { - double minProcessors = processorsPrecision == ProcessorsPrecision.DOUBLE - ? randomDoubleProcessorCount() - : randomFloatProcessorCount(); + double minProcessors = randomDoubleProcessorCount(); return new DesiredNode( Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), @@ -175,21 +103,6 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve updateDesiredNodes(nodes, version); } - private void addClusterNodesToDesiredNodesWithIntegerProcessors(int version) throws Exception { - final var nodes = getNodeNames().stream() - .map( - nodeName -> new DesiredNode( - Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), - randomIntBetween(1, 24), - ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() - ) - ) - .toList(); - updateDesiredNodes(nodes, version); - } - private void updateDesiredNodes(List nodes, int version) throws IOException { final var request = new Request("PUT", "/_internal/desired_nodes/upgrade_test/" + version); try (var builder = JsonXContent.contentBuilder()) { @@ -226,10 +139,6 @@ private double randomDoubleProcessorCount() { return randomDoubleBetween(0.5, 512.1234, true); } - private float randomFloatProcessorCount() { - return randomIntBetween(1, 512) + randomFloat(); - } - @SuppressWarnings("unchecked") private static T extractValue(Map map, String path) { return (T) XContentMapValues.extractValue(path, map); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java index 70658da70eb80..bca0c26ad2c32 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DownsampleIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.junit.Before; import java.io.IOException; @@ -244,10 +243,6 @@ private String getRollupIndexName() throws IOException { } public void testRollupIndex() throws Exception { - assumeTrue( - "Downsample got many stability improvements in 8.10.0", - oldClusterHasFeature(RestTestLegacyFeatures.TSDB_DOWNSAMPLING_STABLE) - ); if (isOldCluster()) { createIlmPolicy(); createIndex(); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/HealthNodeUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/HealthNodeUpgradeIT.java deleted file mode 100644 index 2ed1b7fe9e79b..0000000000000 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/HealthNodeUpgradeIT.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.upgrades; - -import com.carrotsearch.randomizedtesting.annotations.Name; - -import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.hamcrest.Matchers; - -import java.nio.charset.StandardCharsets; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class HealthNodeUpgradeIT extends AbstractRollingUpgradeTestCase { - - public HealthNodeUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { - super(upgradedNodes); - } - - public void testHealthNode() throws Exception { - if (clusterHasFeature("health.supports_health")) { - assertBusy(() -> { - Response response = client().performRequest(new Request("GET", "_cat/tasks")); - String tasks = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(tasks, Matchers.containsString("health-node")); - }); - assertBusy(() -> { - String path = clusterHasFeature("health.supports_health_report_api") ? "_health_report" : "_internal/_health"; - Response response = client().performRequest(new Request("GET", path)); - Map health_report = entityAsMap(response.getEntity()); - assertThat(health_report.get("status"), equalTo("green")); - }); - } - } -} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java index 65bf62783fd69..86a0151e33119 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java @@ -18,10 +18,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.test.ListMatcher; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -258,7 +259,6 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio public void testTsdb() throws IOException { final Version oldClusterVersion = Version.fromString(getOldClusterVersion()); - assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_NEW_INDEX_FORMAT)); StringBuilder bulk = new StringBuilder(); if (isOldCluster()) { @@ -385,6 +385,7 @@ private void tsdbBulk(StringBuilder bulk, String dim, long timeStart, long timeE private void assertTsdbAgg(final Version oldClusterVersion, final List expectedTsids, final Matcher... expected) throws IOException { + @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) boolean onOrAfterTsidHashingVersion = oldClusterVersion.onOrAfter(Version.V_8_13_0); Request request = new Request("POST", "/tsdb/_search"); request.addParameter("size", "0"); @@ -414,14 +415,18 @@ private void assertTsdbAgg(final Version oldClusterVersion, final List e } public void testSyntheticSource() throws IOException { - assumeTrue("added in 8.4.0", oldClusterHasFeature(RestTestLegacyFeatures.SYNTHETIC_SOURCE_SUPPORTED)); - if (isOldCluster()) { Request createIndex = new Request("PUT", "/synthetic"); XContentBuilder indexSpec = XContentBuilder.builder(XContentType.JSON.xContent()).startObject(); + boolean useIndexSetting = SourceFieldMapper.onOrAfterDeprecateModeVersion(getOldClusterIndexVersion()); + if (useIndexSetting) { + indexSpec.startObject("settings").field("index.mapping.source.mode", "synthetic").endObject(); + } indexSpec.startObject("mappings"); { - indexSpec.startObject("_source").field("mode", "synthetic").endObject(); + if (useIndexSetting == false) { + indexSpec.startObject("_source").field("mode", "synthetic").endObject(); + } indexSpec.startObject("properties").startObject("kwd").field("type", "keyword").endObject().endObject(); } indexSpec.endObject(); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java index 8c369ebc9950d..1eb7cbd3f70c2 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/LogsIndexModeRollingUpgradeIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; -import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.hamcrest.Matcher; @@ -30,9 +29,6 @@ import java.util.Map; import java.util.function.Supplier; -import static org.elasticsearch.test.MapMatcher.assertMap; -import static org.elasticsearch.test.MapMatcher.matchesMap; - public class LogsIndexModeRollingUpgradeIT extends AbstractRollingUpgradeTestCase { @ClassRule() @@ -160,14 +156,10 @@ public void testLogsIndexing() throws IOException { assertOK(bulkIndexResponse); assertThat(entityAsMap(bulkIndexResponse).get("errors"), Matchers.is(false)); - assertIndexMappingsAndSettings(0, Matchers.nullValue(), matchesMap().extraOk()); - assertIndexMappingsAndSettings(1, Matchers.nullValue(), matchesMap().extraOk()); - assertIndexMappingsAndSettings(2, Matchers.nullValue(), matchesMap().extraOk()); - assertIndexMappingsAndSettings( - 3, - Matchers.equalTo("logsdb"), - matchesMap().extraOk().entry("_source", Map.of("mode", "synthetic")) - ); + assertIndexSettings(0, Matchers.nullValue()); + assertIndexSettings(1, Matchers.nullValue()); + assertIndexSettings(2, Matchers.nullValue()); + assertIndexSettings(3, Matchers.equalTo("logsdb")); } } @@ -183,13 +175,11 @@ static void enableLogsdbByDefault() throws IOException { assertOK(client().performRequest(request)); } - private void assertIndexMappingsAndSettings(int backingIndex, final Matcher indexModeMatcher, final MapMatcher mappingsMatcher) - throws IOException { + private void assertIndexSettings(int backingIndex, final Matcher indexModeMatcher) throws IOException { assertThat( getSettings(client(), getWriteBackingIndex(client(), "logs-apache-production", backingIndex)).get("index.mode"), indexModeMatcher ); - assertMap(getIndexMappingAsMap(getWriteBackingIndex(client(), "logs-apache-production", backingIndex)), mappingsMatcher); } private static Request createDataStream(final String dataStreamName) { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index 3343a683bbd11..9217852f1867c 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -50,12 +49,6 @@ public SnapshotBasedRecoveryIT(@Name("upgradedNodes") int upgradedNodes) { } public void testSnapshotBasedRecovery() throws Exception { - assumeTrue( - "Cancel shard allocation command is broken for initial versions of the desired_balance allocator", - oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_BALANCED_ALLOCATOR_SUPPORTED) == false - || oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_BALANCED_ALLOCATOR_FIXED) - ); - final String indexName = "snapshot_based_recovery"; final String repositoryName = "snapshot_based_recovery_repo"; final int numDocs = 200; diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java index 6744c84f29d0f..46b39128c3a31 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.io.IOException; import java.time.Instant; @@ -24,8 +23,6 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; public class TsdbIT extends AbstractRollingUpgradeTestCase { @@ -131,7 +128,6 @@ public TsdbIT(@Name("upgradedNodes") int upgradedNodes) { """; public void testTsdbDataStream() throws Exception { - assumeTrue("TSDB was GA-ed in 8.7.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_GENERALLY_AVAILABLE)); String dataStreamName = "k8s"; if (isOldCluster()) { final String INDEX_TEMPLATE = """ @@ -155,70 +151,6 @@ public void testTsdbDataStream() throws Exception { } } - public void testTsdbDataStreamWithComponentTemplate() throws Exception { - assumeTrue( - "TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", - oldClusterHasFeature(RestTestLegacyFeatures.TSDB_GENERALLY_AVAILABLE) - && (oldClusterHasFeature(RestTestLegacyFeatures.TSDB_EMPTY_TEMPLATE_FIXED) == false) - ); - String dataStreamName = "template-with-component-template"; - if (isOldCluster()) { - final String COMPONENT_TEMPLATE = """ - { - "template": $TEMPLATE - } - """; - var putComponentTemplate = new Request("POST", "/_component_template/1"); - String template = TEMPLATE.replace("\"time_series\"", "\"time_series\", \"routing_path\": [\"k8s.pod.uid\"]"); - putComponentTemplate.setJsonEntity(COMPONENT_TEMPLATE.replace("$TEMPLATE", template)); - assertOK(client().performRequest(putComponentTemplate)); - final String INDEX_TEMPLATE = """ - { - "index_patterns": ["$PATTERN"], - "composed_of": ["1"], - "data_stream": { - } - }"""; - // Add composable index template - String templateName = "2"; - var putIndexTemplateRequest = new Request("POST", "/_index_template/" + templateName); - putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$PATTERN", dataStreamName)); - assertOK(client().performRequest(putIndexTemplateRequest)); - - performOldClustertOperations(templateName, dataStreamName); - } else if (isMixedCluster()) { - performMixedClusterOperations(dataStreamName); - } else if (isUpgradedCluster()) { - performUpgradedClusterOperations(dataStreamName); - - var dataStreams = getDataStream(dataStreamName); - assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); - assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(2)); - String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); - { - var indices = getIndex(firstBackingIndex); - var escapedBackingIndex = firstBackingIndex.replace(".", "\\."); - assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo(dataStreamName)); - assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), nullValue()); - String startTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); - assertThat(startTime, nullValue()); - String endTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); - assertThat(endTime, nullValue()); - } - String secondBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.1.index_name"); - { - var indices = getIndex(secondBackingIndex); - var escapedBackingIndex = secondBackingIndex.replace(".", "\\."); - assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo(dataStreamName)); - assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), equalTo("time_series")); - String startTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); - assertThat(startTime, notNullValue()); - String endTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); - assertThat(endTime, notNullValue()); - } - } - } - private void performUpgradedClusterOperations(String dataStreamName) throws Exception { ensureGreen(dataStreamName); var rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java index f3a322b54039a..b2298c12b7b98 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java @@ -17,13 +17,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.is; public class UpgradeWithOldIndexSettingsIT extends AbstractRollingUpgradeTestCase { @@ -35,33 +33,22 @@ public UpgradeWithOldIndexSettingsIT(@Name("upgradedNodes") int upgradedNodes) { private static final String EXPECTED_WARNING = "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will " + "be removed in a future release! See the breaking changes documentation for the next major version."; - private static final String EXPECTED_V8_WARNING = "[index.indexing.slowlog.level] setting was deprecated in the previous Elasticsearch" - + " release and is removed in this release."; - public void testOldIndexSettings() throws Exception { if (isOldCluster()) { Request createTestIndex = new Request("PUT", "/" + INDEX_NAME); createTestIndex.setJsonEntity("{\"settings\": {\"index.indexing.slowlog.level\": \"WARN\"}}"); createTestIndex.setOptions(expectWarnings(EXPECTED_WARNING)); - if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED)) { - assertTrue( - expectThrows(ResponseException.class, () -> client().performRequest(createTestIndex)).getMessage() - .contains("unknown setting [index.indexing.slowlog.level]") - ); + assertTrue( + expectThrows(ResponseException.class, () -> client().performRequest(createTestIndex)).getMessage() + .contains("unknown setting [index.indexing.slowlog.level]") + ); - Request createTestIndex1 = new Request("PUT", "/" + INDEX_NAME); - client().performRequest(createTestIndex1); - } else { - // create index with settings no longer valid in 8.0 - client().performRequest(createTestIndex); - } + Request createTestIndex1 = new Request("PUT", "/" + INDEX_NAME); + client().performRequest(createTestIndex1); // add some data Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { - bulk.setOptions(expectWarnings(EXPECTED_WARNING)); - } bulk.setJsonEntity(Strings.format(""" {"index": {"_index": "%s"}} {"f1": "v1", "f2": "v2"} @@ -71,34 +58,12 @@ public void testOldIndexSettings() throws Exception { // add some more data Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { - bulk.setOptions(expectWarnings(EXPECTED_WARNING)); - } bulk.setJsonEntity(Strings.format(""" {"index": {"_index": "%s"}} {"f1": "v3", "f2": "v4"} """, INDEX_NAME)); client().performRequest(bulk); } else { - if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { - Request createTestIndex = new Request("PUT", "/" + INDEX_NAME + "/_settings"); - // update index settings should work - createTestIndex.setJsonEntity("{\"index.indexing.slowlog.level\": \"INFO\"}"); - createTestIndex.setOptions(expectWarnings(EXPECTED_V8_WARNING)); - client().performRequest(createTestIndex); - - // ensure we were able to change the setting, despite it having no effect - Request indexSettingsRequest = new Request("GET", "/" + INDEX_NAME + "/_settings"); - Map response = entityAsMap(client().performRequest(indexSettingsRequest)); - - var slowLogLevel = (String) (XContentMapValues.extractValue( - INDEX_NAME + ".settings.index.indexing.slowlog.level", - response - )); - - // check that we can read our old index settings - assertThat(slowLogLevel, is("INFO")); - } assertCount(INDEX_NAME, 2); } } @@ -118,16 +83,6 @@ private void assertCount(String index, int countAtLeast) throws IOException { public static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { Request request = new Request("PUT", "/" + index + "/_settings"); request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); - if (oldClusterHasFeature(RestTestLegacyFeatures.DEPRECATION_WARNINGS_LEAK_FIXED) == false) { - // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) - // Below warnings are set (and leaking) from an index in this test case - request.setOptions(expectVersionSpecificWarnings(v -> { - v.compatible( - "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will be removed in a future release! " - + "See the breaking changes documentation for the next major version." - ); - })); - } client().performRequest(request); } } diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index af4e55a709a64..c707c2b5e8c80 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -8,7 +8,6 @@ */ import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.legacy-yaml-rest-test' diff --git a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java index 6e15e40efa69a..46c6d1b9228d6 100644 --- a/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java +++ b/qa/system-indices/src/main/java/org/elasticsearch/system/indices/SystemIndicesQA.java @@ -10,6 +10,7 @@ package org.elasticsearch.system.indices; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.internal.node.NodeClient; @@ -177,12 +178,12 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + var content = request.requiredContent(); IndexRequest indexRequest = new IndexRequest(".net-new-system-index-primary"); - indexRequest.source(request.requiredContent(), request.getXContentType()); + indexRequest.source(content, request.getXContentType()); indexRequest.id(request.param("id")); indexRequest.setRefreshPolicy(request.param("refresh")); - - return channel -> client.index(indexRequest, new RestToXContentListener<>(channel)); + return channel -> client.index(indexRequest, ActionListener.withRef(new RestToXContentListener<>(channel), content)); } @Override diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index f74ee7c59b26b..67fc962e087cb 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -8,7 +8,6 @@ */ import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -19,7 +18,7 @@ dependencies { testImplementation project(':modules:rest-root') } -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { version = bwcVersion.toString() setting 'xpack.security.enabled', 'true' diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 8e1df37804708..650d17e41de7f 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,4 +59,11 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") task.skipTest("cat.shards/10_basic/Help", "sync_id is removed in 9.0") + task.skipTest("search/500_date_range/from, to, include_lower, include_upper deprecated", "deprecated parameters are removed in 9.0") + task.skipTest("tsdb/20_mapping/stored source is supported", "no longer serialize source_mode") + task.skipTest("tsdb/20_mapping/Synthetic source", "no longer serialize source_mode") + task.skipTest("logsdb/10_settings/create logs index", "no longer serialize source_mode") + task.skipTest("logsdb/20_source_mapping/stored _source mode is supported", "no longer serialize source_mode") + task.skipTest("logsdb/20_source_mapping/include/exclude is supported with stored _source", "no longer serialize source_mode") + task.skipTest("logsdb/20_source_mapping/synthetic _source is default", "no longer serialize source_mode") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/async_search.submit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/async_search.submit.json index 5cd2b0e26459e..3de0dec85f547 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/async_search.submit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/async_search.submit.json @@ -43,11 +43,6 @@ "description":"Control whether the response should be stored in the cluster if it completed within the provided [wait_for_completion] time (default: false)", "default":false }, - "keep_alive": { - "type": "time", - "description": "Update the time interval in which the results (partial or final) for this search will be available", - "default": "5d" - }, "batched_reduce_size":{ "type":"number", "description":"The number of shard results that should be reduced at once on the coordinating node. This value should be used as the granularity at which progress results will be made available.", @@ -65,6 +60,11 @@ "type":"boolean", "description":"Specify whether wildcard and prefix queries should be analyzed (default: false)" }, + "ccs_minimize_roundtrips":{ + "type":"boolean", + "default":false, + "description":"When doing a cross-cluster search, setting it to true may improve overall search latency, particularly when searching clusters with a large number of shards. However, when set to true, the progress of searches on the remote clusters will not be received until the search finishes on all clusters." + }, "default_operator":{ "type":"enum", "options":[ @@ -126,6 +126,11 @@ "type":"string", "description":"Specify the node or shard the operation should be performed on (default: random)" }, + "rest_total_hits_as_int":{ + "type":"boolean", + "description":"Indicates whether hits.total should be rendered as an integer or an object in the rest search response", + "default":false + }, "q":{ "type":"string", "description":"Query in the Lucene query string syntax" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json index 745136848786c..cb4eee007a246 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.delete.json @@ -4,7 +4,7 @@ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html", "description": "Delete an inference endpoint" }, - "stability": "experimental", + "stability": "stable", "visibility": "public", "headers": { "accept": [ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json index 7b7aa0f56fcbc..14e7519c3796e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.get.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html", "description":"Get an inference endpoint" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json index 3195476ce1e9e..eb4c1268c28ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.inference.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html", "description":"Perform inference" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json index 9ff5ff4b80c58..411392fe39908 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.put.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html", "description":"Configure an inference endpoint for use in the Inference API" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json index 32b4b2f311837..493306e10d5c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.stream_inference.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/post-stream-inference-api.html", "description":"Perform streaming inference" }, - "stability":"experimental", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "text/event-stream"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json index bce8dfd794dca..6f3d09c15c081 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json @@ -55,6 +55,10 @@ "type": "string", "description": "Specific the time to live for the point in time", "required": true + }, + "allow_partial_search_results": { + "type": "boolean", + "description": "Specify whether to tolerate shards missing when creating the point-in-time, or otherwise throw an exception. (default: false)" } }, "body":{ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/40_source_modes.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/40_source_modes.yml new file mode 100644 index 0000000000000..64bbad7fb1c6d --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/40_source_modes.yml @@ -0,0 +1,50 @@ +--- +test source modes: + - requires: + cluster_features: ["cluster.stats.source_modes"] + reason: requires source modes features + + - do: + indices.create: + index: test-synthetic + body: + settings: + index: + mapping: + source.mode: synthetic + + - do: + indices.create: + index: test-stored + + - do: + indices.create: + index: test-disabled + body: + settings: + index: + mapping: + source.mode: disabled + + - do: + bulk: + refresh: true + body: + - '{ "create": { "_index": "test-synthetic" } }' + - '{ "name": "aaaa", "some_string": "AaAa", "some_int": 1000, "some_double": 123.456789, "some_bool": true }' + - '{ "create": { "_index": "test-stored" } }' + - '{ "name": "bbbb", "some_string": "BbBb", "some_int": 2000, "some_double": 321.987654, "some_bool": false }' + - '{ "create": { "_index": "test-disabled" } }' + - '{ "name": "cccc", "some_string": "CcCc", "some_int": 3000, "some_double": 421.484654, "some_bool": false }' + + - do: + search: + index: test-* + - match: { hits.total.value: 3 } + + - do: + cluster.stats: { } + + - match: { indices.mappings.source_modes.disabled: 1 } + - match: { indices.mappings.source_modes.stored: 1 } + - match: { indices.mappings.source_modes.synthetic: 1 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/15_open_closed_state.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/15_open_closed_state.yml new file mode 100644 index 0000000000000..94b6a3acc83a8 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/15_open_closed_state.yml @@ -0,0 +1,22 @@ +--- +"Ensure index state is exposed": + - requires: + cluster_features: ["gte_v8.1.0"] + reason: index state added to stats in 8.1.0 + + - do: + indices.create: + index: openindex + - do: + indices.create: + index: closedindex + - do: + indices.close: + index: closedindex + - do: + indices.stats: + expand_wildcards: [open,closed] + forbid_closed_indices: false + + - match: { indices.openindex.status: open } + - match: { indices.closedindex.status: close } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index d0f89b1b8b6cb..463df7d2ab1bb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -76,11 +76,6 @@ create logs index: - is_true: test - match: { test.settings.index.mode: "logsdb" } - - do: - indices.get_mapping: - index: test - - match: { test.mappings._source.mode: synthetic } - --- using default timestamp field mapping: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml index 27146557bb1be..06a007b8aaca5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/20_source_mapping.yml @@ -13,10 +13,10 @@ synthetic _source is default: index: mode: logsdb - do: - indices.get: + indices.get_settings: index: test-default-source - - - match: { test-default-source.mappings._source.mode: "synthetic" } + - match: { test-default-source.settings.index.mode: logsdb } + - match: { test-default-source.settings.index.mapping.source.mode: null } --- stored _source mode is supported: @@ -28,11 +28,12 @@ stored _source mode is supported: index: mode: logsdb mapping.source.mode: stored + - do: - indices.get: + indices.get_settings: index: test-stored-source - - - match: { test-stored-source.mappings._source.mode: "stored" } + - match: { test-stored-source.settings.index.mode: logsdb } + - match: { test-stored-source.settings.index.mapping.source.mode: stored } --- disabled _source is not supported: @@ -110,7 +111,6 @@ include/exclude is supported with stored _source: indices.get: index: test-includes - - match: { test-includes.mappings._source.mode: "stored" } - match: { test-includes.mappings._source.includes: ["a"] } - do: @@ -129,5 +129,4 @@ include/exclude is supported with stored _source: indices.get: index: test-excludes - - match: { test-excludes.mappings._source.mode: "stored" } - match: { test-excludes.mappings._source.excludes: ["b"] } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/500_date_range.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/500_date_range.yml index e9bfffb8da604..76057b5a364fb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/500_date_range.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/500_date_range.yml @@ -123,29 +123,3 @@ setup: - match: { hits.total: 1 } - length: { hits.hits: 1 } - match: { hits.hits.0._id: "4" } - - ---- -"from, to, include_lower, include_upper deprecated": - - requires: - cluster_features: "gte_v8.16.0" - reason: 'from, to, include_lower, include_upper parameters are deprecated since 8.16.0' - test_runner_features: warnings - - - do: - warnings: - - "Deprecated field [from] used, this field is unused and will be removed entirely" - - "Deprecated field [to] used, this field is unused and will be removed entirely" - - "Deprecated field [include_lower] used, this field is unused and will be removed entirely" - - "Deprecated field [include_upper] used, this field is unused and will be removed entirely" - search: - index: dates - body: - sort: field - query: - range: - date: - from: 1000 - to: 2023 - include_lower: false - include_upper: false diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml index 675b98133ce11..93f1fafa7ab85 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml @@ -17,7 +17,10 @@ setup: - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 - do: synonyms.get_synonym: @@ -64,7 +67,10 @@ setup: - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 - do: synonyms.get_synonym: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml index 4e77e10495109..7f545b466e65f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml @@ -14,7 +14,10 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml index 5e6d4ec2341ad..9e6af0f471e6e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml @@ -17,7 +17,10 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 --- "Get synonyms set": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml index 23c907f6a1137..62e8fe333ce99 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml @@ -15,7 +15,11 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 + --- "Delete synonyms set": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml index 7c145dafd81cd..3815ea2c96c97 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml @@ -13,7 +13,10 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 - do: synonyms.put_synonym: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml index d8611000fe465..02757f711f690 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml @@ -17,7 +17,11 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 + --- "Update a synonyms rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml index 0c962b51e08cb..9f1aa1d254169 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml @@ -17,8 +17,10 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true - + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 --- "Get a synonym rule": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml index 41ab293158a35..d2c706decf4fd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml @@ -17,7 +17,10 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 --- "Delete synonym rule": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml index 3aba0f0b4b78b..965cae551fab2 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml @@ -16,7 +16,10 @@ setup: # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 # Create an index with synonym_filter that uses that synonyms set - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index ac01f2dc0178a..d6c98673253fb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -28,7 +28,10 @@ # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - wait_for_no_initializing_shards: true + index: .synonyms-2 + timeout: 2s + wait_for_status: green + ignore: 408 # Create my_index1 with synonym_filter that uses synonyms_set1 - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index 4d8f03a6e5e18..9fe3f5e0b7272 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -450,11 +450,6 @@ nested fields: type: long time_series_metric: gauge - - do: - indices.get_mapping: {} - - - match: {tsdb-synthetic.mappings._source.mode: synthetic} - --- stored source is supported: - requires: @@ -486,12 +481,6 @@ stored source is supported: type: keyword time_series_dimension: true - - do: - indices.get: - index: tsdb_index - - - match: { tsdb_index.mappings._source.mode: "stored" } - --- disabled source is not supported: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index db718959919da..54b2bf59c8ddc 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -129,7 +129,7 @@ noop update: {} --- -update: +regular update: - requires: cluster_features: ["gte_v8.2.0"] reason: tsdb indexing changed in 8.2.0 diff --git a/server/build.gradle b/server/build.gradle index ef64b0746dfc4..0bd807751ecbb 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -7,8 +7,6 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.internal-cluster-test' @@ -133,7 +131,7 @@ def generatePluginsList = tasks.register("generatePluginsList") { sourceSets.main.output.dir(generatedResourcesDir) sourceSets.main.compiledBy(generateModulesList, generatePluginsList) -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' systemProperty 'es.failure_store_feature_flag_enabled', 'true' diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java deleted file mode 100644 index 8b5e014b519c8..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/ListenerActionIT.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action; - -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.Requests; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; - -public class ListenerActionIT extends ESIntegTestCase { - public void testThreadedListeners() throws Throwable { - final CountDownLatch latch = new CountDownLatch(1); - final AtomicReference failure = new AtomicReference<>(); - final AtomicReference threadName = new AtomicReference<>(); - Client client = client(); - - IndexRequest request = new IndexRequest("test").id("1"); - if (randomBoolean()) { - // set the source, without it, we will have a verification failure - request.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1"); - } - - client.index(request, new ActionListener() { - @Override - public void onResponse(DocWriteResponse indexResponse) { - threadName.set(Thread.currentThread().getName()); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - threadName.set(Thread.currentThread().getName()); - failure.set(e); - latch.countDown(); - } - }); - - latch.await(); - - assertFalse(threadName.get().contains("listener")); - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 8c80cee58f46c..76a6717ab1d09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matcher; @@ -31,6 +32,7 @@ import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.test.MockLog.assertThatLogger; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.equalTo; @@ -211,4 +213,25 @@ public void testLogLocalHotThreads() { ) ); } + + @TestLogging(reason = "testing logging at various levels", value = "org.elasticsearch.action.admin.HotThreadsIT:TRACE") + public void testLogLocalCurrentThreadsInPlainText() { + final var level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR); + assertThatLogger( + () -> HotThreads.logLocalCurrentThreads(logger, level, getTestName()), + HotThreadsIT.class, + new MockLog.SeenEventExpectation( + "Should log hot threads header in plain text", + HotThreadsIT.class.getCanonicalName(), + level, + "testLogLocalCurrentThreadsInPlainText: Hot threads at" + ), + new MockLog.SeenEventExpectation( + "Should log hot threads cpu usage in plain text", + HotThreadsIT.class.getCanonicalName(), + level, + "cpu usage by thread" + ) + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java index 4977d87d5a348..deae022795ad2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/IncrementalBulkIT.java @@ -65,7 +65,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK.getKey(), "512B") .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK_SIZE.getKey(), "2048B") - .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK.getKey(), "2KB") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK.getKey(), "4KB") .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK_SIZE.getKey(), "1024B") .build(); } @@ -161,6 +161,8 @@ public void testIncrementalBulkLowWatermarkBackOff() throws Exception { IndexRequest indexRequest = indexRequest(index); long total = indexRequest.ramBytesUsed(); + long lowWaterMarkSplits = indexingPressure.stats().getLowWaterMarkSplits(); + long highWaterMarkSplits = indexingPressure.stats().getHighWaterMarkSplits(); while (total < 2048) { refCounted.incRef(); handler.addItems(List.of(indexRequest), refCounted::decRef, () -> nextPage.set(true)); @@ -175,6 +177,8 @@ public void testIncrementalBulkLowWatermarkBackOff() throws Exception { handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextPage.set(true)); assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L))); + assertBusy(() -> assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(lowWaterMarkSplits + 1))); + assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(highWaterMarkSplits)); PlainActionFuture future = new PlainActionFuture<>(); handler.lastItems(List.of(indexRequest), refCounted::decRef, future); @@ -192,6 +196,8 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception { IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, nodeName); IndexingPressure indexingPressure = internalCluster().getInstance(IndexingPressure.class, nodeName); ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName); + long lowWaterMarkSplits = indexingPressure.stats().getLowWaterMarkSplits(); + long highWaterMarkSplits = indexingPressure.stats().getHighWaterMarkSplits(); AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {}); AtomicBoolean nextPage = new AtomicBoolean(false); @@ -217,6 +223,8 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception { handlerNoThrottle.addItems(requestsNoThrottle, refCounted::decRef, () -> nextPage.set(true)); assertTrue(nextPage.get()); nextPage.set(false); + assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(highWaterMarkSplits)); + assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(lowWaterMarkSplits)); ArrayList> requestsThrottle = new ArrayList<>(); // Test that a request larger than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is throttled @@ -235,6 +243,8 @@ public void testIncrementalBulkHighWatermarkBackOff() throws Exception { // Wait until we are ready for the next page assertBusy(() -> assertTrue(nextPage.get())); + assertBusy(() -> assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(highWaterMarkSplits + 1))); + assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(lowWaterMarkSplits)); for (IncrementalBulkService.Handler h : handlers) { refCounted.incRef(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index a1395f81eb091..67576059de1e0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -669,7 +670,7 @@ public Aggregator subAggregator(String aggregatorName) { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) { return new InternalAggregation[] { buildEmptyAggregation() }; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index b70da34c8fe3f..309bf69f00be0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -65,6 +65,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; @@ -262,27 +263,16 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { .setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); - logger.info("--> checking single filtering alias search"); - assertResponse( + assertResponses( + searchResponse -> assertHits(searchResponse.getHits(), "1"), prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1") - ); - - logger.info("--> checking single filtering alias wildcard search"); - assertResponse( - prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1") + prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()) ); - assertResponse( + assertResponses( + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3"), prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") - ); - - logger.info("--> checking single filtering alias search with sort"); - assertResponse( - prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_index", SortOrder.ASC), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_index", SortOrder.ASC) ); logger.info("--> checking single filtering alias search with global facets"); @@ -323,28 +313,12 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { searchResponse -> assertHits(searchResponse.getHits(), "1", "2") ); - logger.info("--> checking single non-filtering alias search"); - assertResponse( + assertResponses( + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4"), prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") - ); - - logger.info("--> checking non-filtering alias and filtering alias search"); - assertResponse( prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") - ); - - logger.info("--> checking index and filtering alias search"); - assertResponse( prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") - ); - - logger.info("--> checking index and alias wildcard search"); - assertResponse( - prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3", "4") + prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()) ); } @@ -506,11 +480,11 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertHits(searchResponse.getHits(), "21", "31", "13", "33") ); - assertResponse( + assertResponses( + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(4L)), prepareSearch("filter23", "filter13").setSize(0).setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(4L)) + prepareSearch("filter13", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) ); - assertResponse( prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13") @@ -519,16 +493,10 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { prepareSearch("filter23", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(5L)) ); - assertResponse( prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "33") ); - assertResponse( - prepareSearch("filter13", "filter1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(4L)) - ); - assertResponse( prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33") @@ -537,7 +505,6 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { prepareSearch("filter13", "filter1", "filter23").setSize(0).setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(6L)) ); - assertResponse( prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33") @@ -546,7 +513,6 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { prepareSearch("filter23", "filter13", "test2").setSize(0).setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(6L)) ); - assertResponse( prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()), searchResponse -> assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33") @@ -1325,17 +1291,13 @@ public void testIndexingAndQueryingHiddenAliases() throws Exception { searchResponse -> assertHits(searchResponse.getHits(), "2", "3") ); - // Ensure that all docs can be gotten through the alias - assertResponse( + assertResponses( + searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3"), + // Ensure that all docs can be gotten through the alias prepareSearch(alias).setQuery(QueryBuilders.matchAllQuery()), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") - ); - - // And querying using a wildcard with indices options set to expand hidden - assertResponse( + // And querying using a wildcard with indices options set to expand hidden prepareSearch("alias*").setQuery(QueryBuilders.matchAllQuery()) - .setIndicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, true, false, false)), - searchResponse -> assertHits(searchResponse.getHits(), "1", "2", "3") + .setIndicesOptions(IndicesOptions.fromOptions(false, false, true, false, true, true, true, false, false)) ); // And that querying the alias with a wildcard and no expand options fails diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java index 9a71bf86388a4..355427c4e059b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterInfoServiceUtils; import org.elasticsearch.cluster.InternalClusterInfoService; @@ -68,6 +69,7 @@ public void testDesiredBalanceMetrics() { final var infoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); ClusterInfoServiceUtils.setUpdateFrequency(infoService, TimeValue.timeValueMillis(200)); assertNotNull("info should not be null", ClusterInfoServiceUtils.refresh(infoService)); + ClusterRerouteUtils.reroute(client()); // ensure we leverage the latest cluster info final var telemetryPlugin = getTelemetryPlugin(internalCluster().getMasterName()); telemetryPlugin.collect(); @@ -115,6 +117,15 @@ public void testDesiredBalanceMetrics() { assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } + final var currentNodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_WEIGHT_METRIC_NAME + ); + assertThat(currentNodeWeightsMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeWeightsMetrics) { + assertTrue(nodeStat.isDouble()); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } final var currentNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME ); @@ -194,6 +205,7 @@ private static void assertMetricsAreBeingPublished(String nodeName, boolean shou testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME), matcher ); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_WEIGHT_METRIC_NAME), matcher); assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME), matcher); assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME), matcher); assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME), matcher); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 0647a24aa39c8..de9e3f28a2109 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -50,6 +50,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -843,24 +844,13 @@ public void testMultipleTemplate() throws IOException { ensureGreen(); - // ax -> matches template - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1); + assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); + assertNull(response.getHits().getAt(0).field("field2")); + }, prepareSearch("ax").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), - response -> { - assertHitCount(response, 1); - assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); - assertNull(response.getHits().getAt(0).field("field2")); - } - ); - - // bx -> matches template - assertResponse( - prepareSearch("bx").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2"), - response -> { - assertHitCount(response, 1); - assertEquals("value1", response.getHits().getAt(0).field("field1").getValue().toString()); - assertNull(response.getHits().getAt(0).field("field2")); - } + prepareSearch("bx").setQuery(termQuery("field1", "value1")).addStoredField("field1").addStoredField("field2") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index c25ce822f8755..81a39dbe1f9f7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -37,6 +37,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.jsonSimulatePipelineRequest; import static org.elasticsearch.ingest.IngestPipelineTestUtils.putJsonPipelineRequest; import static org.elasticsearch.test.NodeRoles.nonIngestNode; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -97,7 +98,7 @@ public void testSimulate() throws Exception { if (randomBoolean()) { response = clusterAdmin().prepareSimulatePipeline(bytes, XContentType.JSON).setId("_id").get(); } else { - SimulatePipelineRequest request = new SimulatePipelineRequest(bytes, XContentType.JSON); + SimulatePipelineRequest request = jsonSimulatePipelineRequest(bytes); request.setId("_id"); response = clusterAdmin().simulatePipeline(request).get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java index 9364e7437141e..e4d44212f2854 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java @@ -9,35 +9,48 @@ package org.elasticsearch.monitor.metrics; +import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import static org.elasticsearch.index.IndexingPressure.MAX_COORDINATING_BYTES; import static org.elasticsearch.index.IndexingPressure.MAX_PRIMARY_BYTES; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class NodeIndexingMetricsIT extends ESIntegTestCase { @@ -453,6 +466,211 @@ public void testPrimaryDocumentRejectionMetricsFluctuatingOverTime() throws Exce } } + // Borrowed this test from IncrementalBulkIT and added test for metrics to it + public void testIncrementalBulkLowWatermarkSplitMetrics() throws Exception { + final String nodeName = internalCluster().startNode( + Settings.builder() + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK.getKey(), "512B") + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK_SIZE.getKey(), "2048B") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK.getKey(), "4KB") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK_SIZE.getKey(), "1024B") + .build() + ); + ensureStableCluster(1); + + String index = "test"; + createIndex(index); + + IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, nodeName); + IndexingPressure indexingPressure = internalCluster().getInstance(IndexingPressure.class, nodeName); + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + testTelemetryPlugin.resetMeter(); + + IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest(); + + AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {}); + AtomicBoolean nextPage = new AtomicBoolean(false); + + IndexRequest indexRequest = indexRequest(index); + long total = indexRequest.ramBytesUsed(); + while (total < 2048) { + refCounted.incRef(); + handler.addItems(List.of(indexRequest), refCounted::decRef, () -> nextPage.set(true)); + assertTrue(nextPage.get()); + nextPage.set(false); + indexRequest = indexRequest(index); + total += indexRequest.ramBytesUsed(); + } + + assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), greaterThan(0L)); + assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(0L)); + + testTelemetryPlugin.collect(); + assertThat( + getSingleRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.low_watermark_splits.total" + ).getLong(), + equalTo(0L) + ); + assertThat( + getSingleRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.high_watermark_splits.total" + ).getLong(), + equalTo(0L) + ); + + refCounted.incRef(); + handler.addItems(List.of(indexRequest(index)), refCounted::decRef, () -> nextPage.set(true)); + + assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L))); + assertBusy(() -> assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(1L))); + assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(0L)); + + testTelemetryPlugin.collect(); + assertThat( + getLatestRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.low_watermark_splits.total" + ).getLong(), + equalTo(1L) + ); + assertThat( + getLatestRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.high_watermark_splits.total" + ).getLong(), + equalTo(0L) + ); + + PlainActionFuture future = new PlainActionFuture<>(); + handler.lastItems(List.of(indexRequest), refCounted::decRef, future); + + BulkResponse bulkResponse = safeGet(future); + assertNoFailures(bulkResponse); + assertFalse(refCounted.hasReferences()); + } + + // Borrowed this test from IncrementalBulkIT and added test for metrics to it + public void testIncrementalBulkHighWatermarkSplitMetrics() throws Exception { + final String nodeName = internalCluster().startNode( + Settings.builder() + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK.getKey(), "512B") + .put(IndexingPressure.SPLIT_BULK_LOW_WATERMARK_SIZE.getKey(), "2048B") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK.getKey(), "4KB") + .put(IndexingPressure.SPLIT_BULK_HIGH_WATERMARK_SIZE.getKey(), "1024B") + .build() + ); + ensureStableCluster(1); + + String index = "test"; + createIndex(index); + + IncrementalBulkService incrementalBulkService = internalCluster().getInstance(IncrementalBulkService.class, nodeName); + IndexingPressure indexingPressure = internalCluster().getInstance(IndexingPressure.class, nodeName); + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName); + final TestTelemetryPlugin testTelemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + testTelemetryPlugin.resetMeter(); + + AbstractRefCounted refCounted = AbstractRefCounted.of(() -> {}); + AtomicBoolean nextPage = new AtomicBoolean(false); + + ArrayList handlers = new ArrayList<>(); + for (int i = 0; i < 4; ++i) { + ArrayList> requests = new ArrayList<>(); + add512BRequests(requests, index); + IncrementalBulkService.Handler handler = incrementalBulkService.newBulkRequest(); + handlers.add(handler); + refCounted.incRef(); + handler.addItems(requests, refCounted::decRef, () -> nextPage.set(true)); + assertTrue(nextPage.get()); + nextPage.set(false); + } + + // Test that a request smaller than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is not throttled + ArrayList> requestsNoThrottle = new ArrayList<>(); + add512BRequests(requestsNoThrottle, index); + IncrementalBulkService.Handler handlerNoThrottle = incrementalBulkService.newBulkRequest(); + handlers.add(handlerNoThrottle); + refCounted.incRef(); + handlerNoThrottle.addItems(requestsNoThrottle, refCounted::decRef, () -> nextPage.set(true)); + assertTrue(nextPage.get()); + nextPage.set(false); + assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(0L)); + + testTelemetryPlugin.collect(); + assertThat( + getSingleRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.low_watermark_splits.total" + ).getLong(), + equalTo(0L) + ); + assertThat( + getSingleRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.high_watermark_splits.total" + ).getLong(), + equalTo(0L) + ); + + ArrayList> requestsThrottle = new ArrayList<>(); + // Test that a request larger than SPLIT_BULK_HIGH_WATERMARK_SIZE (1KB) is throttled + add512BRequests(requestsThrottle, index); + add512BRequests(requestsThrottle, index); + + CountDownLatch finishLatch = new CountDownLatch(1); + blockWritePool(threadPool, finishLatch); + IncrementalBulkService.Handler handlerThrottled = incrementalBulkService.newBulkRequest(); + refCounted.incRef(); + handlerThrottled.addItems(requestsThrottle, refCounted::decRef, () -> nextPage.set(true)); + assertFalse(nextPage.get()); + finishLatch.countDown(); + + handlers.add(handlerThrottled); + + // Wait until we are ready for the next page + assertBusy(() -> assertTrue(nextPage.get())); + assertBusy(() -> assertThat(indexingPressure.stats().getHighWaterMarkSplits(), equalTo(1L))); + assertThat(indexingPressure.stats().getLowWaterMarkSplits(), equalTo(0L)); + + testTelemetryPlugin.collect(); + assertThat( + getLatestRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.low_watermark_splits.total" + ).getLong(), + equalTo(0L) + ); + assertThat( + getLatestRecordedMetric( + testTelemetryPlugin::getLongAsyncCounterMeasurement, + "es.indexing.coordinating.high_watermark_splits.total" + ).getLong(), + equalTo(1L) + ); + + for (IncrementalBulkService.Handler h : handlers) { + refCounted.incRef(); + PlainActionFuture future = new PlainActionFuture<>(); + h.lastItems(List.of(indexRequest(index)), refCounted::decRef, future); + BulkResponse bulkResponse = safeGet(future); + assertNoFailures(bulkResponse); + } + + assertBusy(() -> assertThat(indexingPressure.stats().getCurrentCombinedCoordinatingAndPrimaryBytes(), equalTo(0L))); + refCounted.decRef(); + assertFalse(refCounted.hasReferences()); + testTelemetryPlugin.collect(); + } + private static Measurement getSingleRecordedMetric(Function> metricGetter, String name) { final List measurements = metricGetter.apply(name); assertFalse("Indexing metric is not recorded", measurements.isEmpty()); @@ -470,4 +688,47 @@ private static boolean doublesEquals(double expected, double actual) { final double eps = .0000001; return Math.abs(expected - actual) < eps; } + + private static IndexRequest indexRequest(String index) { + IndexRequest indexRequest = new IndexRequest(); + indexRequest.index(index); + indexRequest.source(Map.of("field", randomAlphaOfLength(10))); + return indexRequest; + } + + private static void add512BRequests(ArrayList> requests, String index) { + long total = 0; + while (total < 512) { + IndexRequest indexRequest = indexRequest(index); + requests.add(indexRequest); + total += indexRequest.ramBytesUsed(); + } + assertThat(total, lessThan(1024L)); + } + + private static void blockWritePool(ThreadPool threadPool, CountDownLatch finishLatch) { + final var threadCount = threadPool.info(ThreadPool.Names.WRITE).getMax(); + final var startBarrier = new CyclicBarrier(threadCount + 1); + final var blockingTask = new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + protected void doRun() { + safeAwait(startBarrier); + safeAwait(finishLatch); + } + + @Override + public boolean isForceExecution() { + return true; + } + }; + for (int i = 0; i < threadCount; i++) { + threadPool.executor(ThreadPool.Names.WRITE).execute(blockingTask); + } + safeAwait(startBarrier); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 039a596f53b38..38eef4f720623 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -19,14 +19,19 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; +import org.elasticsearch.indices.recovery.RecoveryFilesInfoRequest; import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -34,6 +39,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -72,16 +78,14 @@ public void testCancelRecoveryAndResume() throws Exception { // we use 2 nodes a lucky and unlucky one // the lucky one holds the primary // the unlucky one gets the replica and the truncated leftovers - NodeStats primariesNode = dataNodeStats.get(0); - NodeStats unluckyNode = dataNodeStats.get(1); + String primariesNode = dataNodeStats.get(0).getNode().getName(); + String unluckyNode = dataNodeStats.get(1).getNode().getName(); // create the index and prevent allocation on any other nodes than the lucky one // we have no replicas so far and make sure that we allocate the primary on the lucky node assertAcked( prepareCreate("test").setMapping("field1", "type=text", "the_id", "type=text") - .setSettings( - indexSettings(numberOfShards(), 0).put("index.routing.allocation.include._name", primariesNode.getNode().getName()) - ) + .setSettings(indexSettings(numberOfShards(), 0).put("index.routing.allocation.include._name", primariesNode)) ); // only allocate on the lucky node // index some docs and check if they are coming back @@ -102,20 +106,54 @@ public void testCancelRecoveryAndResume() throws Exception { indicesAdmin().prepareFlush().setForce(true).get(); // double flush to create safe commit in case of async durability indicesAdmin().prepareForceMerge().setMaxNumSegments(1).setFlush(true).get(); + // We write some garbage into the shard directory so that we can verify that it is cleaned up before we resend. + // Cleanup helps prevent recovery from failing due to lack of space from garbage left over from a previous + // recovery that crashed during file transmission. #104473 + // We can't look for the presence of the recovery temp files themselves because they are automatically + // cleaned up on clean shutdown by MultiFileWriter. + final String GARBAGE_PREFIX = "recovery.garbage."; + final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean truncate = new AtomicBoolean(true); + + IndicesService unluckyIndices = internalCluster().getInstance(IndicesService.class, unluckyNode); + Function getUnluckyIndexPath = (shardId) -> unluckyIndices.indexService(shardId.getIndex()) + .getShard(shardId.getId()) + .shardPath() + .resolveIndex(); + for (NodeStats dataNode : dataNodeStats) { MockTransportService.getInstance(dataNode.getNode().getName()) .addSendBehavior( - internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + internalCluster().getInstance(TransportService.class, unluckyNode), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; logger.info("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + // During the first recovery attempt (when truncate is set), write an extra garbage file once for each + // file transmitted. We get multiple chunks per file but only one is the last. + if (truncate.get() && req.lastChunk()) { + final var shardPath = getUnluckyIndexPath.apply(req.shardId()); + final var garbagePath = Files.createTempFile(shardPath, GARBAGE_PREFIX, null); + logger.info("writing garbage at: {}", garbagePath); + } if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); throw new RuntimeException("Caused some truncated files for fun and profit"); } + } else if (action.equals(PeerRecoveryTargetService.Actions.FILES_INFO)) { + // verify there are no garbage files present at the FILES_INFO stage of recovery. This precedes FILES_CHUNKS + // and so will run before garbage has been introduced on the first attempt, and before post-transfer cleanup + // has been performed on the second. + final var shardPath = getUnluckyIndexPath.apply(((RecoveryFilesInfoRequest) request).shardId()); + try (var list = Files.list(shardPath).filter(path -> path.getFileName().startsWith(GARBAGE_PREFIX))) { + final var garbageFiles = list.toArray(); + assertArrayEquals( + "garbage files should have been cleaned before file transmission", + new Path[0], + garbageFiles + ); + } } connection.sendRequest(requestId, action, request, options); } @@ -128,14 +166,14 @@ public void testCancelRecoveryAndResume() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put( "index.routing.allocation.include._name", // now allow allocation on all nodes - primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName() + primariesNode + "," + unluckyNode ), "test" ); latch.await(); - // at this point we got some truncated left overs on the replica on the unlucky node + // at this point we got some truncated leftovers on the replica on the unlucky node // now we are allowing the recovery to allocate again and finish to see if we wipe the truncated files truncate.compareAndSet(true, false); ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 90326abb381d0..85f0e2cf7e3ff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -398,7 +398,7 @@ public void testErrorCanRecoverOnRestart() throws Exception { FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); - assertTrue(masterFileSettingsService.watching()); + assertBusy(() -> assertTrue(masterFileSettingsService.watching())); assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); @@ -434,7 +434,7 @@ public void testNewErrorOnRestartReprocessing() throws Exception { FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); - assertTrue(masterFileSettingsService.watching()); + assertBusy(() -> assertTrue(masterFileSettingsService.watching())); assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java index e9efab5934e52..76ea5b99a2a6b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java @@ -11,16 +11,13 @@ import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentType; import java.util.Arrays; import java.util.Collection; @@ -28,6 +25,7 @@ import java.util.Map; import java.util.function.Function; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.newPutStoredScriptTestRequest; import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.putJsonStoredScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -73,14 +71,9 @@ public void testBasics() { safeAwaitAndUnwrapFailure( IllegalArgumentException.class, AcknowledgedResponse.class, - l -> client().execute( - TransportPutStoredScriptAction.TYPE, - new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id("id#") - .content(new BytesArray(Strings.format(""" - {"script": {"lang": "%s", "source": "1"} } - """, LANG)), XContentType.JSON), - l - ) + l -> client().execute(TransportPutStoredScriptAction.TYPE, newPutStoredScriptTestRequest("id#", Strings.format(""" + {"script": {"lang": "%s", "source": "1"} } + """, LANG)), l) ).getMessage() ); } @@ -91,14 +84,9 @@ public void testMaxScriptSize() { safeAwaitAndUnwrapFailure( IllegalArgumentException.class, AcknowledgedResponse.class, - l -> client().execute( - TransportPutStoredScriptAction.TYPE, - new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id("foobar") - .content(new BytesArray(Strings.format(""" - {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ - """, LANG)), XContentType.JSON), - l - ) + l -> client().execute(TransportPutStoredScriptAction.TYPE, newPutStoredScriptTestRequest("foobar", Strings.format(""" + {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ + """, LANG)), l) ).getMessage() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 544f0a08eaa6c..0aa28b9f9dbe8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.sampler.random.InternalRandomSampler; import org.elasticsearch.search.aggregations.bucket.sampler.random.RandomSamplerAggregationBuilder; @@ -20,11 +21,13 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.IntStream; import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.lessThan; @@ -112,27 +115,28 @@ public void testRandomSamplerConsistentSeed() { } ); - for (int i = 0; i < NUM_SAMPLE_RUNS; i++) { - assertResponse( - prepareSearch("idx").setPreference("shard:0") - .addAggregation( - new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) - .setSeed(0) - .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) - .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) - .setShardSeed(42) - ), - response -> { - InternalRandomSampler sampler = response.getAggregations().get("sampler"); - double monotonicValue = ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); - double numericValue = ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); - long docCount = sampler.getDocCount(); - assertEquals(monotonicValue, sampleMonotonicValue[0], tolerance); - assertEquals(numericValue, sampleNumericValue[0], tolerance); - assertEquals(docCount, sampledDocCount[0]); - } - ); - } + assertResponses(response -> { + InternalRandomSampler sampler = response.getAggregations().get("sampler"); + double monotonicValue = ((Avg) sampler.getAggregations().get("mean_monotonic")).getValue(); + double numericValue = ((Avg) sampler.getAggregations().get("mean_numeric")).getValue(); + long docCount = sampler.getDocCount(); + assertEquals(monotonicValue, sampleMonotonicValue[0], tolerance); + assertEquals(numericValue, sampleNumericValue[0], tolerance); + assertEquals(docCount, sampledDocCount[0]); + }, + IntStream.rangeClosed(0, NUM_SAMPLE_RUNS - 1) + .mapToObj( + num -> prepareSearch("idx").setPreference("shard:0") + .addAggregation( + new RandomSamplerAggregationBuilder("sampler").setProbability(PROBABILITY) + .setSeed(0) + .subAggregation(avg("mean_monotonic").field(MONOTONIC_VALUE)) + .subAggregation(avg("mean_numeric").field(NUMERIC_VALUE)) + .setShardSeed(42) + ) + ) + .toArray(SearchRequestBuilder[]::new) + ); } public void testRandomSampler() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 0d06856ca1088..4799b4bec0c8d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -64,6 +64,8 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw } indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()])); assertHitCount(prepareSearch(), (numDocs)); + // hold a copy of the node names before a new node is potentially added later + String[] nodeNamesBeforeClusterResize = internalCluster().getNodeNames(); final int numIters = scaledRandomIntBetween(5, 20); for (int i = 0; i < numIters; i++) { final AtomicBoolean stop = new AtomicBoolean(false); @@ -76,34 +78,37 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw public void run() { try { while (stop.get() == false) { - assertResponse(prepareSearch().setSize(numDocs), response -> { - if (response.getHits().getTotalHits().value() != numDocs) { - // if we did not search all shards but had no serious failures that is potentially fine - // if only the hit-count is wrong. this can happen if the cluster-state is behind when the - // request comes in. It's a small window but a known limitation. - if (response.getTotalShards() != response.getSuccessfulShards() - && Stream.of(response.getShardFailures()) - .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { - nonCriticalExceptions.add( - "Count is " - + response.getHits().getTotalHits().value() - + " but " - + numDocs - + " was expected. " - + formatShardStatus(response) - ); - } else { - assertHitCount(response, numDocs); + assertResponse( + client(randomFrom(nodeNamesBeforeClusterResize)).prepareSearch().setSize(numDocs), + response -> { + if (response.getHits().getTotalHits().value() != numDocs) { + // if we did not search all shards but had no serious failures that is potentially fine + // if only the hit-count is wrong. this can happen if the cluster-state is behind when the + // request comes in. It's a small window but a known limitation. + if (response.getTotalShards() != response.getSuccessfulShards() + && Stream.of(response.getShardFailures()) + .allMatch(ssf -> ssf.getCause() instanceof NoShardAvailableActionException)) { + nonCriticalExceptions.add( + "Count is " + + response.getHits().getTotalHits().value() + + " but " + + numDocs + + " was expected. " + + formatShardStatus(response) + ); + } else { + assertHitCount(response, numDocs); + } } - } - final SearchHits sh = response.getHits(); - assertThat( - "Expected hits to be the same size the actual hits array", - sh.getTotalHits().value(), - equalTo((long) (sh.getHits().length)) - ); - }); + final SearchHits sh = response.getHits(); + assertThat( + "Expected hits to be the same size the actual hits array", + sh.getTotalHits().value(), + equalTo((long) (sh.getHits().length)) + ); + } + ); // this is the more critical but that we hit the actual hit array has a different size than the // actual number of hits. } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index 5d2d5c917415a..cb4d0681cdb23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -63,6 +63,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -189,7 +190,6 @@ public void testProxyConnectionDisconnect() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108061") public void testCancel() throws Exception { assertAcked(client(LOCAL_CLUSTER).admin().indices().prepareCreate("demo")); indexDocs(client(LOCAL_CLUSTER), "demo"); @@ -307,7 +307,7 @@ public void testCancel() throws Exception { } }); - RuntimeException e = expectThrows(RuntimeException.class, () -> queryFuture.result()); + ExecutionException e = expectThrows(ExecutionException.class, () -> queryFuture.result()); assertNotNull(e); assertNotNull(e.getCause()); Throwable t = ExceptionsHelper.unwrap(e, TaskCancelledException.class); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java index c796522eda0e8..b0faeeb295e33 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesIT.java @@ -33,6 +33,7 @@ import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; @@ -105,54 +106,32 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else if (hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } + }, prepareSearch().setQuery(matchAllQuery()) .setPostFilter( boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) ), - response -> { - assertHitCount(response, 3L); - for (SearchHit hit : response.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); - } - } - } - ); - - assertResponse( prepareSearch().setQuery(matchAllQuery()) .setPostFilter( boolQuery().should(termQuery("name", "test").queryName("name")).should(termQuery("title", "title1").queryName("title")) - ), - response -> { - assertHitCount(response, 3L); - for (SearchHit hit : response.getHits()) { - if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); - } - } - } + ) ); } @@ -165,43 +144,25 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 3L); + for (SearchHit hit : response.getHits()) { + if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); + } else { + fail("Unexpected document returned with id " + hit.getId()); + } + } + }, prepareSearch().setQuery( boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title")) ).setPostFilter(termQuery("name", "test").queryName("name")), - response -> { - assertHitCount(response, 3L); - for (SearchHit hit : response.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); - } - } - } - ); - - assertResponse( prepareSearch().setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title")) - .setPostFilter(matchQuery("name", "test").queryName("name")), - response -> { - assertHitCount(response, 3L); - for (SearchHit hit : response.getHits()) { - if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); - assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); - assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); - assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); - } else { - fail("Unexpected document returned with id " + hit.getId()); - } - } - } + .setPostFilter(matchQuery("name", "test").queryName("name")) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 0805d0f366b0f..36580ebda8aee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -97,6 +97,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNotHighlighted; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -596,40 +597,24 @@ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Except } indexRandom(true, indexRequestBuilders); - assertResponse( + assertResponses(response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") + ); + assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); + } + }, prepareSearch().setQuery(matchQuery("title", "bug")) // asking for the whole field to be highlighted .highlighter(new HighlightBuilder().field("title", -1, 0)), - response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - response, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } - } - ); - - assertResponse( prepareSearch().setQuery(matchQuery("title", "bug")) // sentences will be generated out of each value - .highlighter(new HighlightBuilder().field("title")), - response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - response, - i, - "title", - 0, - equalTo("This is a test on the highlighting bug present in elasticsearch. Hopefully it works.") - ); - assertHighlight(response, i, "title", 1, 2, equalTo("This is the second bug to perform highlighting on.")); - } - } + .highlighter(new HighlightBuilder().field("title")) ); assertResponse( @@ -792,27 +777,31 @@ public void testPlainHighlighterOrder() throws Exception { refresh(); { - // fragments should be in order of appearance by default - SearchSourceBuilder source = searchSource().query(matchQuery("field1", "brown dog")) - .highlighter(highlight().highlighterType("plain").field("field1").preTags("").postTags("").fragmentSize(25)); - - assertResponse(prepareSearch("test").setSource(source), response -> { - - assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); - assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); - assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); - }); - // lets be explicit about the order - source = searchSource().query(matchQuery("field1", "brown dog")) - .highlighter( - highlight().highlighterType("plain").field("field1").order("none").preTags("").postTags("").fragmentSize(25) - ); - - assertResponse(prepareSearch("test").setSource(source), response -> { + assertResponses(response -> { assertHighlight(response, 0, "field1", 0, 3, equalTo("The quick brown fox")); assertHighlight(response, 0, "field1", 1, 3, equalTo(" jumps over the lazy brown dog")); assertHighlight(response, 0, "field1", 2, 3, equalTo(" dog doesn't care")); - }); + }, + // fragments should be in order of appearance by default + prepareSearch("test").setSource( + searchSource().query(matchQuery("field1", "brown dog")) + .highlighter( + highlight().highlighterType("plain").field("field1").preTags("").postTags("").fragmentSize(25) + ) + ), + // lets be explicit about the order + prepareSearch("test").setSource( + searchSource().query(matchQuery("field1", "brown dog")) + .highlighter( + highlight().highlighterType("plain") + .field("field1") + .order("none") + .preTags("") + .postTags("") + .fragmentSize(25) + ) + ) + ); } { // order by score @@ -1701,42 +1690,26 @@ public void testDisableFastVectorHighlighter() throws Exception { } ); - // Using plain highlighter instead of FVH - assertResponse( + assertResponses(response -> { + for (int i = 0; i < indexRequestBuilders.length; i++) { + assertHighlight( + response, + i, + "title", + 0, + 1, + equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") + ); + } + }, + // Using plain highlighter instead of FVH prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) .highlighter(new HighlightBuilder().field("title", 50, 1, 10).highlighterType("plain")), - response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - response, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } - } - ); - - // Using plain highlighter instead of FVH on the field level - assertResponse( + // Using plain highlighter instead of FVH on the field level prepareSearch().setQuery(matchPhraseQuery("title", "test for the workaround")) .highlighter( new HighlightBuilder().field(new HighlightBuilder.Field("title").highlighterType("plain")).highlighterType("plain") - ), - response -> { - for (int i = 0; i < indexRequestBuilders.length; i++) { - assertHighlight( - response, - i, - "title", - 0, - 1, - equalTo("This is a test for the workaround for the fast vector highlighting SOLR-3724") - ); - } - } + ) ); } @@ -1826,44 +1799,29 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { .get(); refresh(); - assertResponse( + assertResponses(response -> { + assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); + assertHighlight( + response, + 0, + "tags", + 1, + 2, + equalTo("here is another one that is very long tag and has the tag token near the end") + ); + }, prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) .highlighter( new HighlightBuilder().field( new HighlightBuilder.Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("simple") ) ), - response -> { - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") - ); - } - ); - - assertResponse( prepareSearch("test").setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) .highlighter( new HighlightBuilder().field( new Field("tags").highlighterType("plain").fragmentSize(-1).numOfFragments(2).fragmenter("span") ) - ), - response -> { - assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight( - response, - 0, - "tags", - 1, - 2, - equalTo("here is another one that is very long tag and has the tag token near the end") - ); - } + ) ); assertFailures( @@ -3627,15 +3585,16 @@ public void testWithNestedQuery() throws Exception { assertThat(field.fragments()[1].string(), equalTo("cow")); } ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1); + HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); + assertThat(field.fragments().length, equalTo(1)); + assertThat(field.fragments()[0].string(), equalTo("brown shoes")); + }, prepareSearch().setQuery(nestedQuery("foo", prefixQuery("foo.text", "bro"), ScoreMode.None)) .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), - response -> { - assertHitCount(response, 1); - HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown shoes")); - } + prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) + .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))) ); assertResponse( prepareSearch().setQuery(nestedQuery("foo", matchPhraseQuery("foo.text", "brown shoes"), ScoreMode.None)) @@ -3647,16 +3606,6 @@ public void testWithNestedQuery() throws Exception { assertThat(field.fragments()[0].string(), equalTo("brown shoes")); } ); - assertResponse( - prepareSearch().setQuery(nestedQuery("foo", matchPhrasePrefixQuery("foo.text", "bro"), ScoreMode.None)) - .highlighter(new HighlightBuilder().field(new Field("foo.text").highlighterType(type))), - response -> { - assertHitCount(response, 1); - HighlightField field = response.getHits().getAt(0).getHighlightFields().get("foo.text"); - assertThat(field.fragments().length, equalTo(1)); - assertThat(field.fragments()[0].string(), equalTo("brown shoes")); - } - ); } // For unified and fvh highlighters we just check that the nested query is correctly extracted diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 16e5e42e00c9f..0310af3685e3e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -65,6 +65,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -203,18 +204,16 @@ public void testStoredFields() throws Exception { assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); assertThat(response.getHits().getAt(0).getFields().get("field2"), nullValue()); }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("field3"), response -> { + assertResponses(response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); assertThat(response.getHits().getHits().length, equalTo(1)); assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*3"), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - }); + }, + prepareSearch().setQuery(matchAllQuery()).addStoredField("field3"), + prepareSearch().setQuery(matchAllQuery()).addStoredField("*3"), + prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3") + ); assertResponse( prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").addStoredField("field1").addStoredField("field2"), response -> { @@ -232,12 +231,6 @@ public void testStoredFields() throws Exception { assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); assertThat(response.getHits().getAt(0).getFields().get("field1").getValue().toString(), equalTo("value1")); }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3"), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getFields().get("field3").getValue().toString(), equalTo("value3")); - }); assertResponse(prepareSearch().setQuery(matchAllQuery()).addStoredField("*"), response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); assertThat(response.getHits().getHits().length, equalTo(1)); @@ -865,47 +858,7 @@ public void testDocValueFields() throws Exception { if (randomBoolean()) { builder.addDocValueField("*_field"); } - assertResponse(builder, response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); - assertThat(response.getHits().getHits().length, equalTo(1)); - Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); - assertThat( - fields, - equalTo( - newHashSet( - "byte_field", - "short_field", - "integer_field", - "long_field", - "float_field", - "double_field", - "date_field", - "boolean_field", - "text_field", - "keyword_field", - "binary_field", - "ip_field" - ) - ) - ); - - assertThat(response.getHits().getAt(0).getFields().get("byte_field").getValues(), equalTo(List.of(1L))); - assertThat(response.getHits().getAt(0).getFields().get("short_field").getValues(), equalTo(List.of(2L))); - assertThat(response.getHits().getAt(0).getFields().get("integer_field").getValues(), equalTo(List.of(3L))); - assertThat(response.getHits().getAt(0).getFields().get("long_field").getValues(), equalTo(List.of(4L))); - assertThat(response.getHits().getAt(0).getFields().get("float_field").getValues(), equalTo(List.of(5.0))); - assertThat(response.getHits().getAt(0).getFields().get("double_field").getValues(), equalTo(List.of(6.0d))); - assertThat( - response.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatter.forPattern("date_optional_time").format(date)) - ); - assertThat(response.getHits().getAt(0).getFields().get("boolean_field").getValues(), equalTo(List.of(true))); - assertThat(response.getHits().getAt(0).getFields().get("text_field").getValues(), equalTo(List.of("foo"))); - assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); - assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); - assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"), response -> { + assertResponses(response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); assertThat(response.getHits().getHits().length, equalTo(1)); Set fields = new HashSet<>(response.getHits().getAt(0).getFields().keySet()); @@ -944,7 +897,7 @@ public void testDocValueFields() throws Exception { assertThat(response.getHits().getAt(0).getFields().get("keyword_field").getValues(), equalTo(List.of("foo"))); assertThat(response.getHits().getAt(0).getFields().get("binary_field").getValues(), equalTo(List.of("KmQ="))); assertThat(response.getHits().getAt(0).getFields().get("ip_field").getValues(), equalTo(List.of("::1"))); - }); + }, builder, prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field")); assertResponse( prepareSearch().setQuery(matchAllQuery()) .addDocValueField("byte_field", "#.0") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index 76384253282de..9988624f6a677 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -51,6 +51,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.anyOf; @@ -135,64 +136,21 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception { lonlat.add(20f); lonlat.add(11f); - assertHitCount( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) - ), - (numDummyDocs + 2) - ); - - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))) - ), - response -> { - assertHitCount(response, (numDummyDocs + 2)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - } - ); - // Test Exp - - assertHitCount( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) - ), - (numDummyDocs + 2) - ); - - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))) - ), - response -> { - assertHitCount(response, (numDummyDocs + 2)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - } - ); - - // Test Lin - - assertHitCount( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH).source(searchSource().query(baseQuery)) - ), - (numDummyDocs + 2) - ); - - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) - ), - response -> { - assertHitCount(response, (numDummyDocs + 2)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - } + assertResponses(response -> { + assertHitCount(response, (numDummyDocs + 2)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertHitCount( + (numDummyDocs + 2), + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH).setSource(searchSource().query(baseQuery)) + ); + }, + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource(searchSource().query(functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km")))), + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource(searchSource().query(functionScoreQuery(baseQuery, linearDecayFunction("loc", lonlat, "1000km")))), + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource(searchSource().query(functionScoreQuery(baseQuery, exponentialDecayFunction("loc", lonlat, "1000km")))) ); } @@ -234,77 +192,46 @@ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception { indexRandom(true, indexBuilders); - // Test Gauss - - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE - ) - ) - ) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } + assertResponses(response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value(), equalTo((long) (numDummyDocs + 2))); + assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); + assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); + for (int i = 0; i < numDummyDocs; i++) { + assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); } - ); - - // Test Exp - - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( - CombineFunction.REPLACE - ) + }, + // Test Gauss + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE ) - ) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - for (int i = 0; i < numDummyDocs; i++) { - assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3))); - } - } - ); - // Test Lin - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().size(numDummyDocs + 2) - .query( - functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( - CombineFunction.REPLACE - ) + ) + ), + // Test Exp + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), exponentialDecayFunction("num", 1.0, 5.0, 1.0)).boostMode( + CombineFunction.REPLACE ) - ) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (numDummyDocs + 2))); - assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2"))); - assertThat(sh.getAt(1).getScore(), equalTo(sh.getAt(0).getScore())); - } + ) + ), + // Test Lin + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().size(numDummyDocs + 2) + .query( + functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0, 1.0)).boostMode( + CombineFunction.REPLACE + ) + ) + ) ); } @@ -355,54 +282,38 @@ public void testBoostModeSettingWorks() throws Exception { ); indexRandom(true, false, indexBuilders); // force no dummy docs - // Test Gauss List lonlat = new ArrayList<>(); lonlat.add(20f); lonlat.add(11f); - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.MULTIPLY - ) + assertResponses(response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value(), equalTo((long) (2))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat(sh.getAt(1).getId(), equalTo("2")); + }, + // Test Gauss + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.MULTIPLY ) ) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - } - ); - // Test Exp - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source(searchSource().query(termQuery("test", "value"))) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (2))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat(sh.getAt(1).getId(), equalTo("2")); - } + ), + // Test Exp + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH).setSource(searchSource().query(termQuery("test", "value"))) ); assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( - CombineFunction.REPLACE - ) + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode( + CombineFunction.REPLACE ) ) - ), + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (2))); @@ -410,7 +321,6 @@ public void testBoostModeSettingWorks() throws Exception { assertThat(sh.getAt(1).getId(), equalTo("1")); } ); - } public void testParseGeoPoint() throws Exception { @@ -447,44 +357,30 @@ public void testParseGeoPoint() throws Exception { constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); - GeoPoint point = new GeoPoint(20, 11); - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", point, "1000km")).boostMode( - CombineFunction.REPLACE - ) + + assertResponses(response -> { + SearchHits sh = response.getHits(); + assertThat(sh.getTotalHits().value(), equalTo((long) (1))); + assertThat(sh.getAt(0).getId(), equalTo("1")); + assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); + }, + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", new GeoPoint(20, 11), "1000km")).boostMode( + CombineFunction.REPLACE ) ) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0, 1.e-5)); - } - ); - // this is equivalent to new GeoPoint(20, 11); just flipped so scores must be same - float[] coords = { 11, 20 }; - assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", coords, "1000km")).boostMode( - CombineFunction.REPLACE - ) + ), + // new float[] {11,20} is equivalent to new GeoPoint(20, 11); just flipped so scores must be same + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("loc", new float[] { 11, 20 }, "1000km")).boostMode( + CombineFunction.REPLACE ) ) - ), - response -> { - SearchHits sh = response.getHits(); - assertThat(sh.getTotalHits().value(), equalTo((long) (1))); - assertThat(sh.getAt(0).getId(), equalTo("1")); - assertThat((double) sh.getAt(0).getScore(), closeTo(1.0f, 1.e-5)); - } + ) ); } @@ -516,16 +412,14 @@ public void testCombineModes() throws Exception { ); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.MULTIPLY - ) + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.MULTIPLY ) ) - ), + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (1))); @@ -534,16 +428,14 @@ public void testCombineModes() throws Exception { } ); assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.REPLACE - ) + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( + CombineFunction.REPLACE ) ) - ), + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (1))); @@ -552,16 +444,12 @@ public void testCombineModes() throws Exception { } ); assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.SUM - ) - ) - ) - ), + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + (searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.SUM) + )) + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (1))); @@ -576,16 +464,12 @@ public void testCombineModes() throws Exception { ); assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.AVG - ) - ) + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.AVG) ) - ), + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (1))); @@ -594,16 +478,12 @@ public void testCombineModes() throws Exception { } ); assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.MIN - ) - ) + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MIN) ) - ), + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (1))); @@ -612,16 +492,12 @@ public void testCombineModes() throws Exception { } ); assertResponse( - client().search( - new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) - .source( - searchSource().query( - functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode( - CombineFunction.MAX - ) - ) + prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) + .setSource( + searchSource().query( + functionScoreQuery(baseQueryBuilder, gaussDecayFunction("num", 0.0, 1.0, null, 0.5)).boostMode(CombineFunction.MAX) ) - ), + ), response -> { SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (1))); @@ -1128,7 +1004,7 @@ public void testMultiFieldOptions() throws Exception { indexRandom(true, doc1, doc2); - assertResponse(client().search(new SearchRequest(new String[] {}).source(searchSource().query(baseQuery))), response -> { + assertResponse(prepareSearch().setSource(searchSource().query(baseQuery)), response -> { assertSearchHits(response, "1", "2"); SearchHits sh = response.getHits(); assertThat(sh.getTotalHits().value(), equalTo((long) (2))); @@ -1138,11 +1014,9 @@ public void testMultiFieldOptions() throws Exception { lonlat.add(20f); lonlat.add(10f); assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) - ) + prepareSearch().setSource( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MIN)) ) ), response -> { @@ -1154,11 +1028,9 @@ public void testMultiFieldOptions() throws Exception { } ); assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) - ) + prepareSearch().setSource( + searchSource().query( + functionScoreQuery(baseQuery, gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode(MultiValueMode.MAX)) ) ), response -> { @@ -1180,11 +1052,9 @@ public void testMultiFieldOptions() throws Exception { indexRandom(true, doc1, doc2); assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) - ) + prepareSearch().setSource( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.SUM)) ) ), response -> { @@ -1197,11 +1067,9 @@ public void testMultiFieldOptions() throws Exception { } ); assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) - ) + prepareSearch().setSource( + searchSource().query( + functionScoreQuery(baseQuery, linearDecayFunction("num", "0", "10").setMultiValueMode(MultiValueMode.AVG)) ) ), response -> { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index a38c9dc916056..e90740c042de3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -43,7 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -137,41 +137,25 @@ public void testMinScoreFunctionScoreBasic() throws Exception { ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); - assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)) - ) - ), - response -> { - if (score < minScore) { - assertThat(response.getHits().getTotalHits().value(), is(0L)); - } else { - assertThat(response.getHits().getTotalHits().value(), is(1L)); - } - } - ); - assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) - ) - ) - ), - response -> { - if (score < minScore) { - assertThat(response.getHits().getTotalHits().value(), is(0L)); - } else { - assertThat(response.getHits().getTotalHits().value(), is(1L)); - } + assertResponses(response -> { + if (score < minScore) { + assertThat(response.getHits().getTotalHits().value(), is(0L)); + } else { + assertThat(response.getHits().getTotalHits().value(), is(1L)); } + }, + prepareSearch().setSource(searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore))), + prepareSearch().setSource( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ) + ) ); } @@ -195,31 +179,20 @@ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOExcept final int finalNumMatchingDocs = numMatchingDocs; - assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs) - ) - ), - response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) - ); - - assertResponse( - client().search( - new SearchRequest(new String[] {}).source( - searchSource().query( - functionScoreQuery( - new MatchAllQueryBuilder(), - new FilterFunctionBuilder[] { - new FilterFunctionBuilder(scriptFunction(script)), - new FilterFunctionBuilder(scriptFunction(script)) } - ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) - ).size(numDocs) - ) - ), - response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs) + assertResponses( + response -> assertMinScoreSearchResponses(numDocs, response, finalNumMatchingDocs), + prepareSearch().setSource(searchSource().query(functionScoreQuery(scriptFunction(script)).setMinScore(minScore)).size(numDocs)), + prepareSearch().setSource( + searchSource().query( + functionScoreQuery( + new MatchAllQueryBuilder(), + new FilterFunctionBuilder[] { + new FilterFunctionBuilder(scriptFunction(script)), + new FilterFunctionBuilder(scriptFunction(script)) } + ).scoreMode(FunctionScoreQuery.ScoreMode.AVG).setMinScore(minScore) + ).size(numDocs) + ) ); - } protected void assertMinScoreSearchResponses(int numDocs, SearchResponse searchResponse, int numMatchingDocs) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 9fed4ead8c248..a7efb2fe0e68b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -69,6 +69,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -149,33 +150,24 @@ public void testRescorePhrase() throws Exception { 5 ), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); + assertHitCount(response, 3); assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); - assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(response.getHits().getHits()[1].getId(), equalTo("3")); - assertThat(response.getHits().getHits()[2].getId(), equalTo("2")); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("3")); + assertThirdHit(response, hasId("2")); } ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("1")); + assertSecondHit(response, hasId("2")); + assertThirdHit(response, hasId("3")); + }, prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown").slop(3)), 5), - response -> { - assertHitCount(response, 3); - assertFirstHit(response, hasId("1")); - assertSecondHit(response, hasId("2")); - assertThirdHit(response, hasId("3")); - } - ); - assertResponse( prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) - .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5), - response -> { - assertHitCount(response, 3); - assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); - assertFirstHit(response, hasId("1")); - assertSecondHit(response, hasId("2")); - assertThirdHit(response, hasId("3")); - } + .setRescorer(new QueryRescorerBuilder(matchPhraseQuery("field1", "the quick brown")), 5) ); } @@ -212,7 +204,15 @@ public void testMoreDocs() throws Exception { prepareIndex("test").setId("11").setSource("field1", "2st street boston massachusetts").get(); prepareIndex("test").setId("12").setSource("field1", "3st street boston massachusetts").get(); indicesAdmin().prepareRefresh("test").get(); - assertResponse( + + assertResponses(response -> { + assertThat(response.getHits().getHits().length, equalTo(5)); + assertHitCount(response, 9); + assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); + assertFirstHit(response, hasId("2")); + assertSecondHit(response, hasId("6")); + assertThirdHit(response, hasId("3")); + }, prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) .setFrom(0) .setSize(5) @@ -221,16 +221,6 @@ public void testMoreDocs() throws Exception { .setRescoreQueryWeight(2.0f), 20 ), - response -> { - assertThat(response.getHits().getHits().length, equalTo(5)); - assertHitCount(response, 9); - assertFirstHit(response, hasId("2")); - assertSecondHit(response, hasId("6")); - assertThirdHit(response, hasId("3")); - } - ); - - assertResponse( prepareSearch().setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(Operator.OR)) .setFrom(0) .setSize(5) @@ -239,15 +229,7 @@ public void testMoreDocs() throws Exception { new QueryRescorerBuilder(matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3)).setQueryWeight(0.6f) .setRescoreQueryWeight(2.0f), 20 - ), - response -> { - assertThat(response.getHits().getHits().length, equalTo(5)); - assertHitCount(response, 9); - assertThat(response.getHits().getMaxScore(), equalTo(response.getHits().getHits()[0].getScore())); - assertFirstHit(response, hasId("2")); - assertSecondHit(response, hasId("6")); - assertThirdHit(response, hasId("3")); - } + ) ); // Make sure non-zero from works: assertResponse( @@ -465,7 +447,8 @@ public void testEquivalence() throws Exception { .setFrom(0) .setSize(resultSize), plain -> { - assertResponse( + assertResponses( + rescored -> assertEquivalent(query, plain, rescored), prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) .setPreference("test") // ensure we hit the same shards for tie-breaking .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) @@ -478,10 +461,6 @@ public void testEquivalence() throws Exception { .setRescoreQueryWeight(0.0f), rescoreWindow ), - rescored -> assertEquivalent(query, plain, rescored) - ); // check equivalence - - assertResponse( prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH) .setPreference("test") // ensure we hit the same shards for tie-breaking .setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR)) @@ -492,8 +471,7 @@ public void testEquivalence() throws Exception { .setQueryWeight(1.0f) .setRescoreQueryWeight(1.0f), rescoreWindow - ), - rescored -> assertEquivalent(query, plain, rescored) + ) ); // check equivalence } ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 4688201c66201..8225386ed02d2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -44,6 +44,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1149,39 +1150,54 @@ public void testSortNestedWithNestedFilter() throws Exception { // With nested filter NestedSortBuilder nestedSort = new NestedSortBuilder("parent.child"); nestedSort.setFilter(QueryBuilders.termQuery("parent.child.filter", true)); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 3); + assertThat(response.getHits().getHits().length, equalTo(3)); + assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); + assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); + assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); + assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); + assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); + }, prepareSearch().setQuery(matchAllQuery()) .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), - response -> { - assertHitCount(response, 3); - assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - } - ); - // Nested path should be automatically detected, expect same results as above search request - assertResponse( prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.child.child_values").setNestedSort(nestedSort).order(SortOrder.ASC)), - response -> { - assertHitCount(response, 3); - assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - } + .addSort( + SortBuilders.fieldSort("parent.child.child_obj.value") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .order(SortOrder.ASC) + ), + // Sort mode: sum with filter + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.SUM) + .order(SortOrder.ASC) + ), + // Sort mode: avg with filter + prepareSearch().setQuery(matchAllQuery()) + .addSort( + SortBuilders.fieldSort("parent.child.child_values") + .setNestedSort( + new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) + ) + .sortMode(SortMode.AVG) + .order(SortOrder.ASC) + ) ); - nestedSort.setFilter(QueryBuilders.termQuery("parent.filter", false)); assertResponse( prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort("parent.parent_values").setNestedSort(nestedSort).order(SortOrder.ASC)), + .addSort( + SortBuilders.fieldSort("parent.parent_values") + .setNestedSort(nestedSort.setFilter(QueryBuilders.termQuery("parent.filter", false))) + .order(SortOrder.ASC) + ), response -> { assertHitCount(response, 3); assertThat(response.getHits().getHits().length, equalTo(3)); @@ -1215,27 +1231,6 @@ public void testSortNestedWithNestedFilter() throws Exception { assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("6")); } ); - // Check if closest nested type is resolved - assertResponse( - prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_obj.value") - .setNestedSort( - new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) - ) - .order(SortOrder.ASC) - ), - response -> { - assertHitCount(response, 3); - assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - } - ); // Sort mode: sum assertResponse( prepareSearch().setQuery(matchAllQuery()) @@ -1275,28 +1270,6 @@ public void testSortNestedWithNestedFilter() throws Exception { assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("2")); } ); - // Sort mode: sum with filter - assertResponse( - prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort( - new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) - ) - .sortMode(SortMode.SUM) - .order(SortOrder.ASC) - ), - response -> { - assertHitCount(response, 3); - assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - } - ); // Sort mode: avg assertResponse( prepareSearch().setQuery(matchAllQuery()) @@ -1336,28 +1309,6 @@ public void testSortNestedWithNestedFilter() throws Exception { assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("1")); } ); - // Sort mode: avg with filter - assertResponse( - prepareSearch().setQuery(matchAllQuery()) - .addSort( - SortBuilders.fieldSort("parent.child.child_values") - .setNestedSort( - new NestedSortBuilder("parent.child").setFilter(QueryBuilders.termQuery("parent.child.filter", true)) - ) - .sortMode(SortMode.AVG) - .order(SortOrder.ASC) - ), - response -> { - assertHitCount(response, 3); - assertThat(response.getHits().getHits().length, equalTo(3)); - assertThat(response.getHits().getHits()[0].getId(), equalTo("1")); - assertThat(response.getHits().getHits()[0].getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getHits()[1].getId(), equalTo("2")); - assertThat(response.getHits().getHits()[1].getSortValues()[0].toString(), equalTo("2")); - assertThat(response.getHits().getHits()[2].getId(), equalTo("3")); - assertThat(response.getHits().getHits()[2].getSortValues()[0].toString(), equalTo("3")); - } - ); } // Issue #9305 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 0fd2bd6f94770..69a9fd7fdd4c7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -57,6 +57,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -301,27 +302,20 @@ public void testDefaults() throws ExecutionException, InterruptedException { ), response -> assertFirstHit(response, hasId("theother")) ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + }, prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) ) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").operator(Operator.AND).type(type) ) - ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } + ) ); } @@ -629,7 +623,10 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException response -> assertFirstHit(response, hasId("theother")) ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + }, prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type( @@ -637,12 +634,6 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ).operator(Operator.AND) ) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type( @@ -650,12 +641,6 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ).analyzer("category").lenient(true).operator(Operator.AND) ) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type( @@ -663,25 +648,17 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ).analyzer("category").lenient(true).operator(Operator.AND) ) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type( MultiMatchQueryBuilder.Type.CROSS_FIELDS ).analyzer("category").lenient(true).operator(Operator.AND) ) - ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } + ) ); - assertResponse( + + assertResponses( + response -> assertFirstHit(response, hasId("theone")), prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) @@ -689,71 +666,42 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException .analyzer("category") ) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category")) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category") ) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .analyzer("category") ) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .analyzer("category") ) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .analyzer("category") ) ), - response -> assertFirstHit(response, hasId("theone")) - ); - - assertResponse( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( MultiMatchQueryBuilder.Type.CROSS_FIELDS ).analyzer("category").operator(Operator.OR) ) - ), - response -> assertFirstHit(response, hasId("theone")) + ) ); // test group based on analyzer -- all fields are grouped into a cross field search @@ -770,8 +718,10 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException assertFirstHit(response, hasId("theone")); } ); + // counter example assertHitCount( + 0L, prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( @@ -779,19 +729,13 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException ).operator(Operator.AND) ) ), - 0L - ); - - // counter example - assertHitCount( prepareSearch("test").setQuery( randomizeType( multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type( randomBoolean() ? MultiMatchQueryBuilder.Type.CROSS_FIELDS : MultiMatchQueryBuilder.DEFAULT_TYPE ).operator(Operator.AND) ) - ), - 0L + ) ); // test if boosts work @@ -828,68 +772,42 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException } ); // Test group based on numeric fields - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertFirstHit(response, hasId("theone")); + }, prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - // Two numeric fields together caused trouble at one point! - assertResponse( + // Two numeric fields together caused trouble at one point! prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } - ); - assertResponse( prepareSearch("test").setQuery( randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)) - ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("theone")); - } + ) ); - assertResponse( + assertResponses(response -> { + /* + * Doesn't find the one because "alpha 15" isn't a number and we don't + * break on spaces. + */ + assertHitCount(response, 1L); + assertFirstHit(response, hasId("ultimate1")); + }, prepareSearch("test").setQuery( randomizeType( multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true) ) ), - response -> { - /* - * Doesn't find the one because "alpha 15" isn't a number and we don't - * break on spaces. - */ - assertHitCount(response, 1L); - assertFirstHit(response, hasId("ultimate1")); - } - ); - // Lenient wasn't always properly lenient with two numeric fields - assertResponse( + // Lenient wasn't always properly lenient with two numeric fields prepareSearch("test").setQuery( randomizeType( multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .lenient(true) ) - ), - response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("ultimate1")); - } + ) ); // Check that cross fields works with date fields assertResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index c8fe9498b156f..28d72518f516e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -30,6 +30,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -50,14 +51,10 @@ public void testBasicAllQuery() throws Exception { reqs.add(prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> { - assertHitCount(response, 2L); - assertHits(response.getHits(), "1", "3"); - }); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar")), response -> { + assertResponses(response -> { assertHitCount(response, 2L); assertHits(response.getHits(), "1", "3"); - }); + }, prepareSearch("test").setQuery(queryStringQuery("foo")), prepareSearch("test").setQuery(queryStringQuery("bar"))); assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> { assertHitCount(response, 3L); assertHits(response.getHits(), "1", "2", "3"); @@ -70,22 +67,18 @@ public void testWithDate() throws Exception { reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertResponses(response -> { assertHits(response.getHits(), "1", "2"); assertHitCount(response, 2L); - }); + }, + prepareSearch("test").setQuery(queryStringQuery("foo bar")), + prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")), + prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")) + ); assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { assertHits(response.getHits(), "1"); assertHitCount(response, 1L); }); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("bar \"2015/09/02\"")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); } public void testWithLotsOfTypes() throws Exception { @@ -94,22 +87,18 @@ public void testWithLotsOfTypes() throws Exception { reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2")); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo bar")), response -> { + assertResponses(response -> { assertHits(response.getHits(), "1", "2"); assertHitCount(response, 2L); - }); + }, + prepareSearch("test").setQuery(queryStringQuery("foo bar")), + prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")), + prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")) + ); assertResponse(prepareSearch("test").setQuery(queryStringQuery("\"2015/09/02\"")), response -> { assertHits(response.getHits(), "1"); assertHitCount(response, 1L); }); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1 OR 1.8")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); } public void testDocWithAllTypes() throws Exception { @@ -118,23 +107,23 @@ public void testDocWithAllTypes() throws Exception { reqs.add(prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("19")), response -> assertHits(response.getHits(), "1")); - // nested doesn't match because it's hidden - assertResponse(prepareSearch("test").setQuery(queryStringQuery("1476383971")), response -> assertHits(response.getHits(), "1")); - // bool doesn't match - assertResponse(prepareSearch("test").setQuery(queryStringQuery("7")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("23")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("42")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")), response -> assertHits(response.getHits(), "1")); - // binary doesn't match - // suggest doesn't match - // geo_point doesn't match + assertResponses( + response -> assertHits(response.getHits(), "1"), + prepareSearch("test").setQuery(queryStringQuery("foo")), + prepareSearch("test").setQuery(queryStringQuery("Bar")), + prepareSearch("test").setQuery(queryStringQuery("Baz")), + prepareSearch("test").setQuery(queryStringQuery("19")), + // nested doesn't match because it's hidden + prepareSearch("test").setQuery(queryStringQuery("1476383971")), + // bool doesn't match + prepareSearch("test").setQuery(queryStringQuery("7")), + prepareSearch("test").setQuery(queryStringQuery("23")), + prepareSearch("test").setQuery(queryStringQuery("1293")), + prepareSearch("test").setQuery(queryStringQuery("42")), + prepareSearch("test").setQuery(queryStringQuery("1.7")), + prepareSearch("test").setQuery(queryStringQuery("1.5")), + prepareSearch("test").setQuery(queryStringQuery("127.0.0.1")) + ); } public void testKeywordWithWhitespace() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index cffba49d5941c..f790cf30e1c0e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -108,6 +108,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -216,21 +217,14 @@ public void testConstantScoreQuery() throws Exception { assertThat(searchHit, hasScore(1.0f)); } }); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 2L); + assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); + }, + prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), prepareSearch("test").setQuery( boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())) - ), - response -> { - assertHitCount(response, 2L); - assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); - } - ); - assertResponse( - prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + random().nextFloat())), - response -> { - assertHitCount(response, 2L); - assertFirstHit(response, hasScore(response.getHits().getAt(1).getScore())); - } + ) ); assertResponse( prepareSearch("test").setQuery( @@ -595,19 +589,19 @@ public void testMultiMatchQuery() throws Exception { indicesAdmin().prepareRefresh("test").get(); builder = multiMatchQuery("value1", "field1", "field2").operator(Operator.AND); // Operator only applies on terms inside a field! - // Fields are always OR-ed together. + // Fields are always OR-ed together. assertSearchHitsWithoutFailures(prepareSearch().setQuery(builder), "1"); refresh(); builder = multiMatchQuery("value1", "field1").field("field3", 1.5f).operator(Operator.AND); // Operator only applies on terms inside - // a field! Fields are always OR-ed - // together. + // a field! Fields are always OR-ed + // together. assertSearchHitsWithoutFailures(prepareSearch().setQuery(builder), "3", "1"); indicesAdmin().prepareRefresh("test").get(); builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f).operator(Operator.AND); // Operator only applies on terms - // inside a field! Fields are - // always OR-ed together. + // inside a field! Fields are + // always OR-ed together. assertResponse(prepareSearch().setQuery(builder), response -> { assertHitCount(response, 2L); assertSearchHits(response, "3", "1"); @@ -732,25 +726,27 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); - BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) - .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); - assertResponse(prepareSearch().setQuery(boolQuery), response -> { + assertResponses(response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("1")); - }); - boolQuery = boolQuery().must(termQuery("field1", "value1")) + }, + prepareSearch().setQuery( + boolQuery().must(termQuery("field1", "value1")) + .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)) + ), + prepareSearch().setQuery( + boolQuery().should(termQuery("field1", "value1")) + .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)) + .minimumShouldMatch(1) + ) + ); + + BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(1)) // Only one should clause is defined, returns no docs. .minimumShouldMatch(2); assertHitCount(prepareSearch().setQuery(boolQuery), 0L); - boolQuery = boolQuery().should(termQuery("field1", "value1")) - .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)) - .minimumShouldMatch(1); - assertResponse(prepareSearch().setQuery(boolQuery), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); boolQuery = boolQuery().must(termQuery("field1", "value1")) .must(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); assertHitCount(prepareSearch().setQuery(boolQuery), 0L); @@ -800,20 +796,18 @@ public void testSpecialRangeSyntaxInQueryString() { prepareIndex("test").setId("2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get(); refresh(); - assertResponse(prepareSearch().setQuery(queryStringQuery("num:>19")), response -> { + assertResponses(response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("2")); - }); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); + }, prepareSearch().setQuery(queryStringQuery("num:>19")), prepareSearch().setQuery(queryStringQuery("num:>=20"))); - assertResponse(prepareSearch().setQuery(queryStringQuery("num:>=20")), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("2")); - }); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>11")), 2L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<20")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("num:<=20")), 2L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("+num:>11 +num:<20")), 1L); + assertHitCount(prepareSearch().setQuery(queryStringQuery("num:>20")), 0L); + assertHitCount(2L, prepareSearch().setQuery(queryStringQuery("num:>11")), prepareSearch().setQuery(queryStringQuery("num:<=20"))); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("num:<20")), + prepareSearch().setQuery(queryStringQuery("+num:>11 +num:<20")) + ); } public void testEmptytermsQuery() throws Exception { @@ -826,8 +820,11 @@ public void testEmptytermsQuery() throws Exception { prepareIndex("test").setId("3").setSource("term", "3"), prepareIndex("test").setId("4").setSource("term", "4") ); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("term", new String[0]))), 0L); - assertHitCount(prepareSearch("test").setQuery(idsQuery()), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("term", new String[0]))), + prepareSearch("test").setQuery(idsQuery()) + ); } public void testTermsQuery() throws Exception { @@ -866,9 +863,12 @@ public void testTermsQuery() throws Exception { assertSearchHitsWithoutFailures(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("dbl", new double[] { 2, 5 }))), "2"); assertSearchHitsWithoutFailures(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("lng", new long[] { 2, 5 }))), "2"); // test valid type, but no matching terms - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("str", "5", "6"))), 0L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("dbl", new double[] { 5, 6 }))), 0L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("lng", new long[] { 5, 6 }))), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("str", "5", "6"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("dbl", new double[] { 5, 6 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("lng", new long[] { 5, 6 }))) + ); } public void testTermsLookupFilter() throws Exception { @@ -1064,106 +1064,35 @@ public void testNumericTermsAndRanges() throws Exception { .get(); refresh(); - logger.info("--> term query on 1"); - assertResponse(prepareSearch("test").setQuery(termQuery("num_byte", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_short", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_integer", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_long", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_float", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termQuery("num_double", 1)), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - logger.info("--> terms query on 1"); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - logger.info("--> term filter on 1"); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - logger.info("--> terms filter on 1"); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), response -> { + assertResponses(response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); - assertResponse(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))), response -> { - assertHitCount(response, 1L); - assertFirstHit(response, hasId("1")); - }); + }, + prepareSearch("test").setQuery(termQuery("num_byte", 1)), + prepareSearch("test").setQuery(termQuery("num_short", 1)), + prepareSearch("test").setQuery(termQuery("num_integer", 1)), + prepareSearch("test").setQuery(termQuery("num_long", 1)), + prepareSearch("test").setQuery(termQuery("num_float", 1)), + prepareSearch("test").setQuery(termQuery("num_double", 1)), + prepareSearch("test").setQuery(termsQuery("num_byte", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_short", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_integer", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_long", new int[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_float", new double[] { 1 })), + prepareSearch("test").setQuery(termsQuery("num_double", new double[] { 1 })), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_byte", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_short", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_integer", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_long", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_float", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termQuery("num_double", 1))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_byte", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_short", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_integer", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_long", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_float", new int[] { 1 }))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("num_double", new int[] { 1 }))) + ); } public void testNumericRangeFilter_2826() throws Exception { @@ -1301,16 +1230,19 @@ public void testSpanMultiTermQuery() throws IOException { prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))), 4); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))), 4); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))), 3); assertHitCount( + 4, + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))), + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))) + ); + assertHitCount( + 3, + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))), prepareSearch("test").setQuery( spanOrQuery(spanMultiTermQueryBuilder(QueryBuilders.rangeQuery("description").from("ffa").to("foo"))) ), - 3 + prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))) ); - assertHitCount(prepareSearch("test").setQuery(spanOrQuery(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))), 3); } public void testSpanNot() throws IOException, ExecutionException, InterruptedException { @@ -1321,6 +1253,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc refresh(); assertHitCount( + 1L, prepareSearch("test").setQuery( spanNotQuery( spanNearQuery(QueryBuilders.spanTermQuery("description", "quick"), 1).addClause( @@ -1329,9 +1262,6 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc spanTermQuery("description", "brown") ) ), - 1L - ); - assertHitCount( prepareSearch("test").setQuery( spanNotQuery( spanNearQuery(QueryBuilders.spanTermQuery("description", "quick"), 1).addClause( @@ -1340,9 +1270,6 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc spanTermQuery("description", "sleeping") ).dist(5) ), - 1L - ); - assertHitCount( prepareSearch("test").setQuery( spanNotQuery( spanNearQuery(QueryBuilders.spanTermQuery("description", "quick"), 1).addClause( @@ -1350,8 +1277,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc ), spanTermQuery("description", "jumped") ).pre(1).post(1) - ), - 1L + ) ); } @@ -1423,22 +1349,19 @@ public void testSimpleDFSQuery() throws IOException { public void testMultiFieldQueryString() { prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); - - logger.info("regular"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:value1")), 1); - logger.info("prefix"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value*").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:value*")), 1); - logger.info("wildcard"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("v?lue*").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:v?lue*")), 1); - logger.info("fuzzy"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("value~").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:value~")), 1); - logger.info("regexp"); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("/value[01]/").field("field1").field("field2")), 1); - assertHitCount(prepareSearch("test").setQuery(queryStringQuery("field\\*:/value[01]/")), 1); + assertHitCount( + 1, + prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:value1")), + prepareSearch("test").setQuery(queryStringQuery("value*").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:value*")), + prepareSearch("test").setQuery(queryStringQuery("v?lue*").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:v?lue*")), + prepareSearch("test").setQuery(queryStringQuery("value~").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:value~")), + prepareSearch("test").setQuery(queryStringQuery("/value[01]/").field("field1").field("field2")), + prepareSearch("test").setQuery(queryStringQuery("field\\*:/value[01]/")) + ); } // see #3797 @@ -1448,9 +1371,12 @@ public void testMultiMatchLenientIssue3797() { prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); - assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), 1L); - assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), 1L); - assertHitCount(prepareSearch("test").setQuery(multiMatchQuery("value2").field("field2", 2).lenient(true)), 1L); + assertHitCount( + 1L, + prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), + prepareSearch("test").setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)), + prepareSearch("test").setQuery(multiMatchQuery("value2").field("field2", 2).lenient(true)) + ); } public void testMinScore() throws ExecutionException, InterruptedException { @@ -1483,24 +1409,15 @@ public void testQueryStringWithSlopAndFields() { assertHitCount(prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), 2); assertHitCount( + 1, prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) .setQuery(QueryBuilders.queryStringQuery("\"one two\"").field("desc")), - 1 - ); - assertHitCount( prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "product")) .setQuery(QueryBuilders.queryStringQuery("\"one three\"~5").field("desc")), - 1 - ); - assertHitCount( prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), - 1 - ); - assertHitCount( prepareSearch("test").setPostFilter(QueryBuilders.termQuery("type", "customer")) - .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")), - 1 + .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")) ); } @@ -1534,91 +1451,51 @@ public void testRangeQueryWithTimeZone() throws Exception { .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("1")); + }, prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("1")); - } - ); - assertResponse( - prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00")), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("2")); - } - ); - assertResponse( - prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00")), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("3")); - } - ); - // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used - assertResponse( + // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used prepareSearch("test").setQuery( QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00") ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("1")); - } - ); - assertResponse( - prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") - ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("2")); - } - ); - assertResponse( + // We define a time zone to be applied to the filter and from/to have no time zone prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") - ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("3")); - } + QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") + ) ); - // We define a time zone to be applied to the filter and from/to have no time zone - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("2")); + }, + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00")), prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00") + QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00") ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("1")); - } - ); - assertResponse( prepareSearch("test").setQuery( QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00") - ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("2")); - } + ) ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + }, + prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00")), prepareSearch("test").setQuery( - QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") - ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("3")); - } + QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00") + ) ); - assertResponse( + assertResponses(response -> { + assertHitCount(response, 1L); + assertThat(response.getHits().getAt(0).getId(), is("3")); + }, prepareSearch("test").setQuery( QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00") ), - response -> { - assertHitCount(response, 1L); - assertThat(response.getHits().getAt(0).getId(), is("3")); - } + prepareSearch("test").setQuery( + QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00") + ) ); assertResponse(prepareSearch("test").setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")), response -> { assertHitCount(response, 1L); @@ -1805,8 +1682,8 @@ public void testFieldAliasesForMetaFields() throws Exception { } /** - * Test that wildcard queries on keyword fields get normalized - */ + * Test that wildcard queries on keyword fields get normalized + */ public void testWildcardQueryNormalizationOnKeywordField() { assertAcked( prepareCreate("test").setSettings( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 522c20b687caa..f9ae30720b33f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -51,6 +51,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; @@ -383,14 +384,10 @@ public void testBasicAllQuery() throws Exception { reqs.add(prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> { + assertResponses(response -> { assertHitCount(response, 2L); assertHits(response.getHits(), "1", "3"); - }); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar")), response -> { - assertHitCount(response, 2L); - assertHits(response.getHits(), "1", "3"); - }); + }, prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), prepareSearch("test").setQuery(simpleQueryStringQuery("bar"))); assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> { assertHitCount(response, 3L); assertHits(response.getHits(), "1", "2", "3"); @@ -407,22 +404,18 @@ public void testWithDate() throws Exception { reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertResponses(response -> { assertHits(response.getHits(), "1", "2"); assertHitCount(response, 2L); - }); + }, + prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), + prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")), + prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")) + ); assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { assertHits(response.getHits(), "1"); assertHitCount(response, 1L); }); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("bar \"2015/09/02\"")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\" \"2015/09/01\"")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); } public void testWithLotsOfTypes() throws Exception { @@ -435,22 +428,18 @@ public void testWithLotsOfTypes() throws Exception { reqs.add(prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2")); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), response -> { + assertResponses(response -> { assertHits(response.getHits(), "1", "2"); assertHitCount(response, 2L); - }); + }, + prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")), + prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")), + prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")) + ); assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("\"2015/09/02\"")), response -> { assertHits(response.getHits(), "1"); assertHitCount(response, 1L); }); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.2 \"2015/09/02\"")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1 1.8")), response -> { - assertHits(response.getHits(), "1", "2"); - assertHitCount(response, 2L); - }); } public void testDocWithAllTypes() throws Exception { @@ -463,34 +452,27 @@ public void testDocWithAllTypes() throws Exception { reqs.add(prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); indexRandom(true, false, reqs); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("19")), response -> assertHits(response.getHits(), "1")); - // nested doesn't match because it's hidden - assertResponse( + assertResponses( + response -> assertHits(response.getHits(), "1"), + prepareSearch("test").setQuery(simpleQueryStringQuery("foo")), + prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")), + prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")), + prepareSearch("test").setQuery(simpleQueryStringQuery("19")), + // nested doesn't match because it's hidden prepareSearch("test").setQuery(simpleQueryStringQuery("1476383971")), - response -> assertHits(response.getHits(), "1") - ); - // bool doesn't match - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("7")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("23")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1293")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("42")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")), response -> assertHits(response.getHits(), "1")); - assertResponse(prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")), response -> assertHits(response.getHits(), "1")); - assertResponse( + // bool doesn't match + prepareSearch("test").setQuery(simpleQueryStringQuery("7")), + prepareSearch("test").setQuery(simpleQueryStringQuery("23")), + prepareSearch("test").setQuery(simpleQueryStringQuery("1293")), + prepareSearch("test").setQuery(simpleQueryStringQuery("42")), + prepareSearch("test").setQuery(simpleQueryStringQuery("1.7")), + prepareSearch("test").setQuery(simpleQueryStringQuery("1.5")), prepareSearch("test").setQuery(simpleQueryStringQuery("127.0.0.1")), - response -> assertHits(response.getHits(), "1") - ); - // binary doesn't match - // suggest doesn't match - // geo_point doesn't match - // geo_shape doesn't match - - assertResponse( - prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)), - response -> assertHits(response.getHits(), "1") + // binary doesn't match + // suggest doesn't match + // geo_point doesn't match + // geo_shape doesn't match + prepareSearch("test").setQuery(simpleQueryStringQuery("foo Bar 19 127.0.0.1").defaultOperator(Operator.AND)) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 9a7ce2c5c28ab..c59c4a045a36b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -33,6 +33,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -68,25 +69,20 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { "_prefer_nodes:somenode,server2" }; for (String pref : preferences) { logger.info("--> Testing out preference={}", pref); - assertResponse(prepareSearch().setSize(0).setPreference(pref), response -> { + assertResponses(response -> { assertThat(RestStatus.OK, equalTo(response.status())); assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); - }); - assertResponse(prepareSearch().setPreference(pref), response -> { - assertThat(RestStatus.OK, equalTo(response.status())); - assertThat(pref, response.getFailedShards(), greaterThanOrEqualTo(0)); - }); + }, prepareSearch().setSize(0).setPreference(pref), prepareSearch().setPreference(pref)); } // _only_local is a stricter preference, we need to send the request to a data node - assertResponse(dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), response -> { + assertResponses(response -> { assertThat(RestStatus.OK, equalTo(response.status())); assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); - }); - assertResponse(dataNodeClient().prepareSearch().setPreference("_only_local"), response -> { - assertThat(RestStatus.OK, equalTo(response.status())); - assertThat("_only_local", response.getFailedShards(), greaterThanOrEqualTo(0)); - }); + }, + dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local"), + dataNodeClient().prepareSearch().setPreference("_only_local") + ); } public void testNoPreferenceRandom() { @@ -121,19 +117,11 @@ public void testSimplePreference() { prepareIndex("test").setSource("field1", "value1").get(); refresh(); - assertResponse( + assertResponses( + response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)), prepareSearch().setQuery(matchAllQuery()), - response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)) - ); - - assertResponse( prepareSearch().setQuery(matchAllQuery()).setPreference("_local"), - response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)) - ); - - assertResponse( - prepareSearch().setQuery(matchAllQuery()).setPreference("1234"), - response -> assertThat(response.getHits().getTotalHits().value(), equalTo(1L)) + prepareSearch().setQuery(matchAllQuery()).setPreference("1234") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 06ce330213af8..789da5aac7568 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -24,6 +24,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -50,18 +51,14 @@ public void testNodeSelection() { // Before we've gathered stats for all nodes, we should try each node once. Set nodeIds = new HashSet<>(); - assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); - nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); - }); - assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { + assertResponses(response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); - }); - assertResponse(client.prepareSearch().setQuery(matchAllQuery()), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(1L)); - nodeIds.add(response.getHits().getAt(0).getShard().getNodeId()); - }); + }, + client.prepareSearch().setQuery(matchAllQuery()), + client.prepareSearch().setQuery(matchAllQuery()), + client.prepareSearch().setQuery(matchAllQuery()) + ); assertEquals(3, nodeIds.size()); // Now after more searches, we should select a node with the lowest ARS rank. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index d1841ebaf8071..87665c3d784f1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; import java.util.function.Function; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; @@ -66,6 +67,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -323,6 +325,12 @@ public void testRandomSorting() throws IOException, InterruptedException, Execut } public void test3078() { + Consumer assertConsumer = response -> { + assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); + assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); + assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); + }; + assertAcked(indicesAdmin().prepareCreate("test").setMapping("field", "type=keyword").get()); ensureGreen(); @@ -332,11 +340,7 @@ public void test3078() { refresh(); assertResponse( prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), - response -> { - assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - } + assertConsumer ); // reindex and refresh prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); @@ -344,22 +348,14 @@ public void test3078() { assertResponse( prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), - response -> { - assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - } + assertConsumer ); // reindex - no refresh prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); assertResponse( prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), - response -> { - assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - } + assertConsumer ); // force merge forceMerge(); @@ -368,20 +364,12 @@ public void test3078() { prepareIndex("test").setId(Integer.toString(1)).setSource("field", Integer.toString(1)).get(); assertResponse( prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), - response -> { - assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - } + assertConsumer ); refresh(); assertResponse( prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)), - response -> { - assertThat(response.getHits().getAt(0).getSortValues()[0].toString(), equalTo("1")); - assertThat(response.getHits().getAt(1).getSortValues()[0].toString(), equalTo("10")); - assertThat(response.getHits().getAt(2).getSortValues()[0].toString(), equalTo("100")); - } + assertConsumer ); } @@ -395,39 +383,19 @@ public void testScoreSortDirection() throws Exception { refresh(); - assertResponse( + assertResponses(response -> { + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); + assertThat(response.getHits().getAt(2).getId(), equalTo("3")); + }, prepareSearch("test").setQuery( QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) ), - response -> { - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - } - ); - assertResponse( - prepareSearch("test").setQuery( - QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC), - response -> { - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getScore(), Matchers.lessThan(response.getHits().getAt(0).getScore())); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(2).getScore(), Matchers.lessThan(response.getHits().getAt(1).getScore())); - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - } - ); - assertResponse( prepareSearch("test").setQuery( QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field")) - ).addSort("_score", SortOrder.DESC), - response -> { - assertThat(response.getHits().getAt(2).getId(), equalTo("3")); - assertThat(response.getHits().getAt(1).getId(), equalTo("2")); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - } + ).addSort("_score", SortOrder.DESC) ); } @@ -878,30 +846,20 @@ public void testSortMissingStrings() throws IOException { throw new RuntimeException(); } - logger.info("--> sort with no missing (same as missing _last)"); - assertResponse( - prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)), - response -> { - assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); + assertResponses(response -> { + assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getId(), equalTo("2")); - } + assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("3")); + assertThat(response.getHits().getAt(2).getId(), equalTo("2")); + }, + // "--> sort with no missing (same as missing _last)" + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)), + // "--> sort with missing _last" + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")) ); - logger.info("--> sort with missing _last"); - assertResponse( - prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last")), - response -> { - assertThat(Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - assertThat(response.getHits().getAt(1).getId(), equalTo("3")); - assertThat(response.getHits().getAt(2).getId(), equalTo("2")); - } - ); logger.info("--> sort with missing _first"); assertResponse( prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first")), @@ -1263,59 +1221,7 @@ public void testSortMVField() throws Exception { assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).longValue(), equalTo(2L)); } ); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); - assertThat(response.getHits().getHits().length, equalTo(3)); - - assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); - assertThat(response.getHits().getHits().length, equalTo(3)); - - assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); - assertThat(response.getHits().getHits().length, equalTo(3)); - - assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(-4)); - - assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(1)); - - assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC), response -> { - assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); - assertThat(response.getHits().getHits().length, equalTo(3)); - - assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(((Number) response.getHits().getAt(0).getSortValues()[0]).intValue(), equalTo(20)); - - assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(((Number) response.getHits().getAt(1).getSortValues()[0]).intValue(), equalTo(10)); - - assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC), response -> { + assertResponses(response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); assertThat(response.getHits().getHits().length, equalTo(3)); @@ -1327,8 +1233,12 @@ public void testSortMVField() throws Exception { assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(7)); - }); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC), response -> { + }, + prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.ASC), + prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.ASC), + prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.ASC) + ); + assertResponses(response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); assertThat(response.getHits().getHits().length, equalTo(3)); @@ -1340,7 +1250,11 @@ public void testSortMVField() throws Exception { assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); assertThat(((Number) response.getHits().getAt(2).getSortValues()[0]).intValue(), equalTo(3)); - }); + }, + prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("int_values", SortOrder.DESC), + prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("short_values", SortOrder.DESC), + prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("byte_values", SortOrder.DESC) + ); assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("float_values", SortOrder.ASC), response -> { assertThat(response.getHits().getTotalHits().value(), equalTo(3L)); assertThat(response.getHits().getHits().length, equalTo(3)); @@ -1478,8 +1392,7 @@ public void testSortOnRareField() throws IOException { } refresh(); - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - + Consumer assertResponse = response -> { assertThat(response.getHits().getHits().length, equalTo(3)); assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); @@ -1490,27 +1403,17 @@ public void testSortOnRareField() throws IOException { assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); - }); + }; + + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), assertResponse); + for (int i = 0; i < 15; i++) { prepareIndex("test").setId(Integer.toString(300 + i)) .setSource(jsonBuilder().startObject().array("some_other_field", "foobar").endObject()) .get(); refresh(); } - - assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), response -> { - - assertThat(response.getHits().getHits().length, equalTo(3)); - - assertThat(response.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); - assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo("20")); - - assertThat(response.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); - assertThat(response.getHits().getAt(1).getSortValues()[0], equalTo("10")); - - assertThat(response.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); - assertThat(response.getHits().getAt(2).getSortValues()[0], equalTo("03")); - }); + assertResponse(prepareSearch().setQuery(matchAllQuery()).setSize(3).addSort("string_values", SortOrder.DESC), assertResponse); } public void testSortMetaField() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 1383f33a41d84..aabca1b9333f8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -34,6 +34,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.closeTo; @@ -292,49 +293,22 @@ public void testSinglePointGeoDistanceSort() throws ExecutionException, Interrup String hashPoint = "s037ms06g7h0"; - GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, hashPoint); - - assertResponse( - prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), - response -> checkCorrectSortOrderForGeoSort(response) - ); - - geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, new GeoPoint(2, 2)); - - assertResponse( - prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), - response -> checkCorrectSortOrderForGeoSort(response) - ); - - geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, 2, 2); - - assertResponse( - prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)), - response -> checkCorrectSortOrderForGeoSort(response) - ); - - assertResponse( + assertResponses( + response -> checkCorrectSortOrderForGeoSort(response), + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, hashPoint).sortMode(SortMode.MIN).order(SortOrder.ASC)), + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, new GeoPoint(2, 2)).sortMode(SortMode.MIN).order(SortOrder.ASC)), + prepareSearch().setQuery(matchAllQuery()) + .addSort(new GeoDistanceSortBuilder(LOCATION_FIELD, 2, 2).sortMode(SortMode.MIN).order(SortOrder.ASC)), prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), - response -> checkCorrectSortOrderForGeoSort(response) - ); - - assertResponse( prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, "s037ms06g7h0"))), - response -> checkCorrectSortOrderForGeoSort(response) - ); - - assertResponse( prepareSearch().setSource(new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0))), - response -> checkCorrectSortOrderForGeoSort(response) - ); - - assertResponse( prepareSearch().setSource( new SearchSourceBuilder().sort( SortBuilders.geoDistanceSort(LOCATION_FIELD, 2.0, 2.0).validation(GeoValidationMethod.COERCE) ) - ), - response -> checkCorrectSortOrderForGeoSort(response) + ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index ec9c680e17fc3..9d53eb03eb04e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -22,12 +22,14 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class MetadataFetchingIT extends ESIntegTestCase { + public void testSimple() { assertAcked(prepareCreate("test")); ensureGreen(); @@ -35,17 +37,14 @@ public void testSimple() { prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - assertResponse(prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true), response -> { + assertResponses(response -> { assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); assertThat(response.getHits().getAt(0).getVersion(), notNullValue()); - }); - - assertResponse(prepareSearch("test").storedFields("_none_"), response -> { - assertThat(response.getHits().getAt(0).getId(), nullValue()); - assertThat(response.getHits().getAt(0).getId(), nullValue()); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - }); + }, + prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true), + prepareSearch("test").storedFields("_none_") + ); } public void testInnerHits() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java index 616fc2e1f3483..0e7f8b604a8df 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/SourceFetchingIT.java @@ -12,11 +12,13 @@ import org.elasticsearch.test.ESIntegTestCase; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; public class SourceFetchingIT extends ESIntegTestCase { + public void testSourceDefaultBehavior() { createIndex("test"); ensureGreen(); @@ -24,18 +26,16 @@ public void testSourceDefaultBehavior() { indexDoc("test", "1", "field", "value"); refresh(); - assertResponse(prepareSearch("test"), response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue())); + assertResponses( + response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()), + prepareSearch("test"), + prepareSearch("test").addStoredField("_source") + ); assertResponse( prepareSearch("test").addStoredField("bla"), response -> assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()) ); - - assertResponse( - prepareSearch("test").addStoredField("_source"), - response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) - ); - } public void testSourceFiltering() { @@ -55,20 +55,20 @@ public void testSourceFiltering() { response -> assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()) ); - assertResponse(prepareSearch("test").setFetchSource("field1", null), response -> { + assertResponses(response -> { assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - }); + }, + prepareSearch("test").setFetchSource("field1", null), + prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }) + ); + assertResponse(prepareSearch("test").setFetchSource("hello", null), response -> { assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); }); - assertResponse(prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" }), response -> { - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value")); - }); + } /** @@ -82,15 +82,13 @@ public void testSourceWithWildcardFiltering() { prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); - assertResponse(prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null), response -> { - assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); - }); - assertResponse(prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null), response -> { + assertResponses(response -> { assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value")); - }); + }, + prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null), + prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 0ceef5d3c70f1..8b21bb54361b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -61,6 +61,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -159,21 +160,13 @@ public void testTextAndGlobalText() throws Exception { } indexRandom(true, indexRequestBuilders); CompletionSuggestionBuilder noText = SuggestBuilders.completionSuggestion(FIELD); - assertResponse( - prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg")), - response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") - ); - CompletionSuggestionBuilder withText = SuggestBuilders.completionSuggestion(FIELD).text("sugg"); - assertResponse( + assertResponses( + response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6"), + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", noText).setGlobalText("sugg")), prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText)), - response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") - ); - - // test that suggestion text takes precedence over global text - assertResponse( - prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")), - response -> assertSuggestions(response, "foo", "suggestion10", "suggestion9", "suggestion8", "suggestion7", "suggestion6") + // test that suggestion text takes precedence over global text + prepareSearch(INDEX).suggest(new SuggestBuilder().addSuggestion("foo", withText).setGlobalText("bogus")) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index 980ef2a87c9c2..e5e641bfdda21 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -109,6 +109,7 @@ public void testRestartNodeDuringSnapshot() throws Exception { final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + safeAwait((ActionListener l) -> flushMasterQueue(clusterService, l)); final var snapshotCompletesWithoutPausingListener = ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { final var entriesForRepo = SnapshotsInProgress.get(state).forRepo(repoName); if (entriesForRepo.isEmpty()) { diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 3b3b06a1c6924..d572d3b90fec8 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -7,6 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ +import org.elasticsearch.internal.CompletionsPostingsFormatExtension; import org.elasticsearch.plugins.internal.RestExtension; /** The Elasticsearch Server Module. */ @@ -288,7 +289,8 @@ to org.elasticsearch.serverless.version, org.elasticsearch.serverless.buildinfo, - org.elasticsearch.serverless.constants; + org.elasticsearch.serverless.constants, + org.elasticsearch.serverless.codec; exports org.elasticsearch.lucene.analysis.miscellaneous; exports org.elasticsearch.lucene.grouping; exports org.elasticsearch.lucene.queries; @@ -395,6 +397,7 @@ org.elasticsearch.stateless, org.elasticsearch.settings.secure, org.elasticsearch.serverless.constants, + org.elasticsearch.serverless.codec, org.elasticsearch.serverless.apifiltering, org.elasticsearch.internal.security; @@ -414,18 +417,18 @@ uses org.elasticsearch.node.internal.TerminationHandlerProvider; uses org.elasticsearch.internal.VersionExtension; uses org.elasticsearch.internal.BuildExtension; + uses CompletionsPostingsFormatExtension; uses org.elasticsearch.features.FeatureSpecification; uses org.elasticsearch.plugins.internal.LoggingDataProvider; provides org.elasticsearch.features.FeatureSpecification with + org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures, org.elasticsearch.action.bulk.BulkFeatures, org.elasticsearch.features.FeatureInfrastructureFeatures, org.elasticsearch.health.HealthFeatures, - org.elasticsearch.cluster.service.TransportFeatures, org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, - org.elasticsearch.indices.IndicesFeatures, org.elasticsearch.repositories.RepositoriesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures, @@ -434,7 +437,8 @@ org.elasticsearch.search.SearchFeatures, org.elasticsearch.script.ScriptFeatures, org.elasticsearch.search.retriever.RetrieversFeatures, - org.elasticsearch.reservedstate.service.FileSettingsFeatures; + org.elasticsearch.reservedstate.service.FileSettingsFeatures, + org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; uses RestExtension; diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 32198ba7584be..3c5c365654206 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -25,7 +25,9 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.DocumentParsingException; @@ -611,23 +613,31 @@ protected static void generateThrowableXContent(XContentBuilder builder, Params */ public static XContentBuilder generateFailureXContent(XContentBuilder builder, Params params, @Nullable Exception e, boolean detailed) throws IOException { - // No exception to render as an error + if (builder.getRestApiVersion() == RestApiVersion.V_8) { + if (e == null) { + return builder.field(ERROR, "unknown"); + } + if (detailed == false) { + return generateNonDetailedFailureXContentV8(builder, e); + } + // else fallthrough + } + if (e == null) { - return builder.field(ERROR, "unknown"); + // No exception to render as an error + builder.startObject(ERROR); + builder.field(TYPE, "unknown"); + builder.field(REASON, "unknown"); + return builder.endObject(); } - // Render the exception with a simple message if (detailed == false) { - String message = "No ElasticsearchException found"; - Throwable t = e; - for (int counter = 0; counter < 10 && t != null; counter++) { - if (t instanceof ElasticsearchException) { - message = t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; - break; - } - t = t.getCause(); - } - return builder.field(ERROR, message); + // just render the type & message + Throwable t = ExceptionsHelper.unwrapCause(e); + builder.startObject(ERROR); + builder.field(TYPE, getExceptionName(t)); + builder.field(REASON, t.getMessage()); + return builder.endObject(); } // Render the exception with all details @@ -646,6 +656,20 @@ public static XContentBuilder generateFailureXContent(XContentBuilder builder, P return builder.endObject(); } + @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // remove V8 API + private static XContentBuilder generateNonDetailedFailureXContentV8(XContentBuilder builder, @Nullable Exception e) throws IOException { + String message = "No ElasticsearchException found"; + Throwable t = e; + for (int counter = 0; counter < 10 && t != null; counter++) { + if (t instanceof ElasticsearchException) { + message = t.getClass().getSimpleName() + "[" + t.getMessage() + "]"; + break; + } + t = t.getCause(); + } + return builder.field(ERROR, message); + } + /** * Parses the output of {@link #generateFailureXContent(XContentBuilder, Params, Exception, boolean)} */ @@ -729,8 +753,8 @@ public static String getExceptionName(Throwable ex) { static String buildMessage(String type, String reason, String stack) { StringBuilder message = new StringBuilder("Elasticsearch exception ["); - message.append(TYPE).append('=').append(type).append(", "); - message.append(REASON).append('=').append(reason); + message.append(TYPE).append('=').append(type); + message.append(", ").append(REASON).append('=').append(reason); if (stack != null) { message.append(", ").append(STACK_TRACE).append('=').append(stack); } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index c9931a6714fc1..996ecfb3ca521 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -104,6 +104,7 @@ static TransportVersion def(int id) { public static final TransportVersion V_8_14_0 = def(8_636_00_1); public static final TransportVersion V_8_15_0 = def(8_702_00_2); public static final TransportVersion V_8_15_2 = def(8_702_00_3); + public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15 = def(8_702_00_4); public static final TransportVersion ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS = def(8_703_00_0); public static final TransportVersion INFERENCE_ADAPTIVE_ALLOCATIONS = def(8_704_00_0); public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0); @@ -177,6 +178,7 @@ static TransportVersion def(int id) { public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16 = def(8_772_00_3); + public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16 = def(8_772_00_4); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); @@ -197,7 +199,20 @@ static TransportVersion def(int id) { public static final TransportVersion VERTEX_AI_INPUT_TYPE_ADDED = def(8_790_00_0); public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE = def(8_791_00_0); public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES = def(8_792_00_0); - public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_793_00_0); + public static final TransportVersion INDEX_STATS_ADDITIONAL_FIELDS = def(8_793_00_0); + public static final TransportVersion INDEX_STATS_ADDITIONAL_FIELDS_REVERT = def(8_794_00_0); + public static final TransportVersion FAST_REFRESH_RCO_2 = def(8_795_00_0); + public static final TransportVersion ESQL_ENRICH_RUNTIME_WARNINGS = def(8_796_00_0); + public static final TransportVersion INGEST_PIPELINE_CONFIGURATION_AS_MAP = def(8_797_00_0); + public static final TransportVersion LOGSDB_TELEMETRY_CUSTOM_CUTOFF_DATE_FIX_8_17 = def(8_797_00_1); + public static final TransportVersion SOURCE_MODE_TELEMETRY_FIX_8_17 = def(8_797_00_2); + public static final TransportVersion INDEXING_PRESSURE_THROTTLING_STATS = def(8_798_00_0); + public static final TransportVersion REINDEX_DATA_STREAMS = def(8_799_00_0); + public static final TransportVersion ESQL_REMOVE_NODE_LEVEL_PLAN = def(8_800_00_0); + public static final TransportVersion LOGSDB_TELEMETRY_CUSTOM_CUTOFF_DATE = def(8_801_00_0); + public static final TransportVersion SOURCE_MODE_TELEMETRY = def(8_802_00_0); + public static final TransportVersion NEW_REFRESH_CLUSTER_BLOCK = def(8_803_00_0); + public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_804_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 7791ca200a785..24aa5bd261d7e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -187,9 +187,12 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); + public static final Version V_8_15_6 = new Version(8_15_06_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_16_1 = new Version(8_16_01_99); + public static final Version V_8_16_2 = new Version(8_16_02_99); public static final Version V_8_17_0 = new Version(8_17_00_99); + public static final Version V_8_18_0 = new Version(8_18_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); public static final Version CURRENT = V_9_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 890c3251e4f9a..a158669d936fe 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -475,4 +475,12 @@ static void runWithResource( ActionListener.run(ActionListener.runBefore(listener, resource::close), l -> action.accept(l, resource)); } + /** + * Increments ref count and returns a listener that will decrement ref count on listener completion. + */ + static ActionListener withRef(ActionListener listener, RefCounted ref) { + ref.mustIncRef(); + return releaseAfter(listener, ref::decRef); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 9f727f49530a1..98d6284fd91d2 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -853,7 +853,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate< registerHandler.accept(new RestClusterStateAction(settingsFilter, threadPool)); registerHandler.accept(new RestClusterHealthAction()); registerHandler.accept(new RestClusterUpdateSettingsAction()); - registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter, clusterSupportsFeature)); + registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter)); registerHandler.accept(new RestClusterRerouteAction(settingsFilter)); registerHandler.accept(new RestClusterSearchShardsAction()); registerHandler.accept(new RestPendingClusterTasksAction()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index bed2815f5a895..a0948af88e2f5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.desirednodes.VersionConflictException; -import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.DesiredNodesMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -99,22 +98,6 @@ protected void masterOperation( ); } - @Override - protected void doExecute(Task task, UpdateDesiredNodesRequest request, ActionListener listener) { - if (request.clusterHasRequiredFeatures(nf -> featureService.clusterHasFeature(clusterService.state(), nf)) == false) { - listener.onFailure( - new IllegalArgumentException( - "Unable to use processor ranges, floating-point (with greater precision) processors " - + "in mixed-clusters with nodes that do not support feature " - + DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED.id() - ) - ); - return; - } - - super.doExecute(task, request, listener); - } - static ClusterState replaceDesiredNodes(ClusterState clusterState, DesiredNodes newDesiredNodes) { return clusterState.copyAndUpdateMetadata( metadata -> metadata.putCustom(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(newDesiredNodes)) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index a94401fdd66f3..21b714b105b59 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -26,7 +25,6 @@ import java.io.IOException; import java.util.List; import java.util.Objects; -import java.util.function.Predicate; public class UpdateDesiredNodesRequest extends AcknowledgedRequest { private static final TransportVersion DRY_RUN_VERSION = TransportVersions.V_8_4_0; @@ -117,11 +115,6 @@ public boolean isDryRun() { return dryRun; } - public boolean clusterHasRequiredFeatures(Predicate clusterHasFeature) { - return clusterHasFeature.test(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED) - || nodes.stream().allMatch(n -> n.clusterHasRequiredFeatures(clusterHasFeature)); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index e2475bca31d53..afe615add28df 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemIndices; @@ -56,15 +55,13 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA /** * Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 */ - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_7_0_0; - public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_7_0_0; + public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0; + public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_8_0_0; private final SystemIndices systemIndices; PersistentTasksService persistentTasksService; @Inject - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // Once we begin working on 9.x, we need to update our migration classes public TransportGetFeatureUpgradeStatusAction( TransportService transportService, ThreadPool threadPool, @@ -149,7 +146,6 @@ static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeSta .map(idxInfo -> ERROR) .map(idxStatus -> GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(idxStatus, initialStatus)) .orElse(initialStatus); - return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus(featureName, minimumVersion, status, indexInfos); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java index 83d1356e5ef62..d20eee96809e8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/features/TransportNodesFeaturesAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.features.FeatureService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -27,8 +27,7 @@ import java.io.IOException; import java.util.List; -@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) -// @UpdateForV10 // this can be removed in v10. It may be called by v8 nodes to v9 nodes. +@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // this can be removed in v10. It may be called by v8 nodes to v9 nodes. public class TransportNodesFeaturesAction extends TransportNodesAction< NodesFeaturesRequest, NodesFeaturesResponse, diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsFeatures.java similarity index 67% rename from server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java rename to server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsFeatures.java index bd39d125969ce..6e85093a52cdd 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsFeatures.java @@ -7,17 +7,20 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.indices; +package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import java.util.Map; +import java.util.Set; + +/** + * Spec for cluster stats features. + */ +public class ClusterStatsFeatures implements FeatureSpecification { -public class IndicesFeatures implements FeatureSpecification { @Override - public Map getHistoricalFeatures() { - return Map.of(IndicesService.SUPPORTS_AUTO_PUT, Version.V_8_8_0); + public Set getFeatures() { + return Set.of(MappingStats.SOURCE_MODES_FEATURE); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index c1f867c247345..5c4be62723e07 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -793,6 +793,8 @@ static class IndexPressureStats implements ToXContentFragment { long currentCoordinatingOps = 0; long currentPrimaryOps = 0; long currentReplicaOps = 0; + long lowWaterMarkSplits = 0; + long highWaterMarkSplits = 0; for (NodeStats nodeStat : nodeStats) { IndexingPressureStats nodeStatIndexingPressureStats = nodeStat.getIndexingPressureStats(); if (nodeStatIndexingPressureStats != null) { @@ -816,6 +818,8 @@ static class IndexPressureStats implements ToXContentFragment { currentReplicaOps += nodeStatIndexingPressureStats.getCurrentReplicaOps(); primaryDocumentRejections += nodeStatIndexingPressureStats.getPrimaryDocumentRejections(); totalCoordinatingRequests += nodeStatIndexingPressureStats.getTotalCoordinatingRequests(); + lowWaterMarkSplits += nodeStatIndexingPressureStats.getLowWaterMarkSplits(); + highWaterMarkSplits += nodeStatIndexingPressureStats.getHighWaterMarkSplits(); } } indexingPressureStats = new IndexingPressureStats( @@ -838,7 +842,9 @@ static class IndexPressureStats implements ToXContentFragment { currentPrimaryOps, currentReplicaOps, primaryDocumentRejections, - totalCoordinatingRequests + totalCoordinatingRequests, + lowWaterMarkSplits, + highWaterMarkSplits ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java index d8db2c5e657b4..ce9b48666d6ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -48,9 +48,10 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, "", parentTaskId, headers); } - public ClusterStatsRequest asRemoteStats() { - this.remoteStats = true; - return this; + public static ClusterStatsRequest newRemoteClusterStatsRequest() { + final var request = new ClusterStatsRequest(); + request.remoteStats = true; + return request; } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java index 737e83d4b30a1..07d9c11ae4c07 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/LongMetric.java @@ -74,6 +74,7 @@ public static LongMetricValue fromStream(StreamInput in) throws IOException { try { // TODO: not sure what is the good value for minBarForHighestToLowestValueRatio here? Histogram dh = Histogram.decodeFromCompressedByteBuffer(bb, 1); + dh.setAutoResize(true); return new LongMetricValue(dh); } catch (DataFormatException e) { throw new IOException(e); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index d2e5973169919..1bc2e1d13c864 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.stats; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -19,6 +20,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -31,6 +34,7 @@ import java.util.HashSet; import java.util.IdentityHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.OptionalLong; @@ -44,6 +48,8 @@ */ public final class MappingStats implements ToXContentFragment, Writeable { + static final NodeFeature SOURCE_MODES_FEATURE = new NodeFeature("cluster.stats.source_modes"); + private static final Pattern DOC_PATTERN = Pattern.compile("doc[\\[.]"); private static final Pattern SOURCE_PATTERN = Pattern.compile("params\\._source"); @@ -53,6 +59,8 @@ public final class MappingStats implements ToXContentFragment, Writeable { public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { Map fieldTypes = new HashMap<>(); Set concreteFieldNames = new HashSet<>(); + // Account different source modes based on index.mapping.source.mode setting: + Map sourceModeUsageCount = new HashMap<>(); Map runtimeFieldTypes = new HashMap<>(); final Map mappingCounts = new IdentityHashMap<>(metadata.getMappingsByHash().size()); for (IndexMetadata indexMetadata : metadata) { @@ -62,6 +70,9 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { continue; } AnalysisStats.countMapping(mappingCounts, indexMetadata); + + var sourceMode = SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings()); + sourceModeUsageCount.merge(sourceMode.toString().toLowerCase(Locale.ENGLISH), 1, Integer::sum); } final AtomicLong totalFieldCount = new AtomicLong(); final AtomicLong totalDeduplicatedFieldCount = new AtomicLong(); @@ -175,12 +186,14 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { for (MappingMetadata mappingMetadata : metadata.getMappingsByHash().values()) { totalMappingSizeBytes += mappingMetadata.source().compressed().length; } + return new MappingStats( totalFieldCount.get(), totalDeduplicatedFieldCount.get(), totalMappingSizeBytes, fieldTypes.values(), - runtimeFieldTypes.values() + runtimeFieldTypes.values(), + sourceModeUsageCount ); } @@ -215,17 +228,20 @@ private static int countOccurrences(String script, Pattern pattern) { private final List fieldTypeStats; private final List runtimeFieldStats; + private final Map sourceModeUsageCount; MappingStats( long totalFieldCount, long totalDeduplicatedFieldCount, long totalMappingSizeBytes, Collection fieldTypeStats, - Collection runtimeFieldStats + Collection runtimeFieldStats, + Map sourceModeUsageCount ) { this.totalFieldCount = totalFieldCount; this.totalDeduplicatedFieldCount = totalDeduplicatedFieldCount; this.totalMappingSizeBytes = totalMappingSizeBytes; + this.sourceModeUsageCount = sourceModeUsageCount; List stats = new ArrayList<>(fieldTypeStats); stats.sort(Comparator.comparing(IndexFeatureStats::getName)); this.fieldTypeStats = Collections.unmodifiableList(stats); @@ -246,6 +262,10 @@ private static int countOccurrences(String script, Pattern pattern) { } fieldTypeStats = in.readCollectionAsImmutableList(FieldStats::new); runtimeFieldStats = in.readCollectionAsImmutableList(RuntimeFieldStats::new); + var transportVersion = in.getTransportVersion(); + sourceModeUsageCount = canReadOrWriteSourceModeTelemetry(transportVersion) + ? in.readImmutableMap(StreamInput::readString, StreamInput::readVInt) + : Map.of(); } @Override @@ -257,6 +277,15 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(fieldTypeStats); out.writeCollection(runtimeFieldStats); + var transportVersion = out.getTransportVersion(); + if (canReadOrWriteSourceModeTelemetry(transportVersion)) { + out.writeMap(sourceModeUsageCount, StreamOutput::writeVInt); + } + } + + private static boolean canReadOrWriteSourceModeTelemetry(TransportVersion version) { + return version.isPatchFrom(TransportVersions.SOURCE_MODE_TELEMETRY_FIX_8_17) + || version.onOrAfter(TransportVersions.SOURCE_MODE_TELEMETRY); } private static OptionalLong ofNullable(Long l) { @@ -300,6 +329,10 @@ public List getRuntimeFieldStats() { return runtimeFieldStats; } + public Map getSourceModeUsageCount() { + return sourceModeUsageCount; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("mappings"); @@ -326,6 +359,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws st.toXContent(builder, params); } builder.endArray(); + builder.startObject("source_modes"); + var entries = sourceModeUsageCount.entrySet().stream().sorted(Map.Entry.comparingByKey()).toList(); + for (var entry : entries) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); builder.endObject(); return builder; } @@ -344,11 +383,19 @@ public boolean equals(Object o) { && Objects.equals(totalDeduplicatedFieldCount, that.totalDeduplicatedFieldCount) && Objects.equals(totalMappingSizeBytes, that.totalMappingSizeBytes) && fieldTypeStats.equals(that.fieldTypeStats) - && runtimeFieldStats.equals(that.runtimeFieldStats); + && runtimeFieldStats.equals(that.runtimeFieldStats) + && sourceModeUsageCount.equals(that.sourceModeUsageCount); } @Override public int hashCode() { - return Objects.hash(totalFieldCount, totalDeduplicatedFieldCount, totalMappingSizeBytes, fieldTypeStats, runtimeFieldStats); + return Objects.hash( + totalFieldCount, + totalDeduplicatedFieldCount, + totalMappingSizeBytes, + fieldTypeStats, + runtimeFieldStats, + sourceModeUsageCount + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 36b018b5002eb..97585ea9a1024 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterSnapshotStats; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -108,20 +109,19 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final MetadataStatsCache mappingStatsCache; private final MetadataStatsCache analysisStatsCache; private final RemoteClusterService remoteClusterService; - private final TransportRemoteClusterStatsAction remoteClusterStatsAction; @Inject public TransportClusterStatsAction( ThreadPool threadPool, ClusterService clusterService, TransportService transportService, + Client client, NodeService nodeService, IndicesService indicesService, RepositoriesService repositoriesService, UsageService usageService, ActionFilters actionFilters, - Settings settings, - TransportRemoteClusterStatsAction remoteClusterStatsAction + Settings settings ) { super( TYPE.name(), @@ -141,7 +141,9 @@ public TransportClusterStatsAction( this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); this.remoteClusterService = transportService.getRemoteClusterService(); this.settings = settings; - this.remoteClusterStatsAction = remoteClusterStatsAction; + + // register remote-cluster action with transport service only and not as a local-node Action that the Client can invoke + new TransportRemoteClusterStatsAction(client, transportService, actionFilters); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportRemoteClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportRemoteClusterStatsAction.java index 4d57f10807af6..882aaa8b18e15 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportRemoteClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportRemoteClusterStatsAction.java @@ -10,11 +10,11 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.RemoteClusterActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -27,26 +27,26 @@ public class TransportRemoteClusterStatsAction extends HandledTransportAction { public static final String NAME = "cluster:monitor/stats/remote"; - public static final ActionType TYPE = new ActionType<>(NAME); public static final RemoteClusterActionType REMOTE_TYPE = new RemoteClusterActionType<>( NAME, RemoteClusterStatsResponse::new ); - private final NodeClient client; + + private final Client client; + private final TransportService transportService; @Inject - public TransportRemoteClusterStatsAction(NodeClient client, TransportService transportService, ActionFilters actionFilters) { + public TransportRemoteClusterStatsAction(Client client, TransportService transportService, ActionFilters actionFilters) { super(NAME, transportService, actionFilters, RemoteClusterStatsRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.client = client; + this.transportService = transportService; } @Override protected void doExecute(Task task, RemoteClusterStatsRequest request, ActionListener listener) { - ClusterStatsRequest subRequest = new ClusterStatsRequest().asRemoteStats(); - subRequest.setParentTask(request.getParentTask()); - client.execute( + new ParentTaskAssigningClient(client, transportService.getLocalNode(), task).execute( TransportClusterStatsAction.TYPE, - subRequest, + ClusterStatsRequest.newRemoteClusterStatsRequest(), listener.map( clusterStatsResponse -> new RemoteClusterStatsResponse( clusterStatsResponse.getClusterUUID(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 35e46d3f2a4da..c3bdfc5a594c0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -11,11 +11,14 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.script.StoredScriptSource; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -28,11 +31,27 @@ public class PutStoredScriptRequest extends AcknowledgedRequest implements ToXContentFragment { - private String id; - private String context; - private BytesReference content; - private XContentType xContentType; - private StoredScriptSource source; + @Nullable + private final String id; + + @Nullable + private final String context; + + /* + * [NOTE: unused fields #117566] + * As of #117566 (8.18) the content and xContentType fields are basically unused, except that we use content().length() for some + * validation. However, in earlier 8.x versions they did at least influence the output of toString(). That means in 9.x we can replace + * these fields with an int representing the original content length once the 9.x transport protocol can diverge from the 8.x one. For + * BwC with 8.18 we can simply send any BytesReference of the appropriate length. + */ + + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // see [NOTE: unused fields #117566] + private final BytesReference content; + + @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // see [NOTE: unused fields #117566] + private final XContentType xContentType; + + private final StoredScriptSource source; public PutStoredScriptRequest(StreamInput in) throws IOException { super(in); @@ -43,15 +62,11 @@ public PutStoredScriptRequest(StreamInput in) throws IOException { source = new StoredScriptSource(in); } - public PutStoredScriptRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { - super(masterNodeTimeout, ackTimeout); - } - public PutStoredScriptRequest( TimeValue masterNodeTimeout, TimeValue ackTimeout, - String id, - String context, + @Nullable String id, + @Nullable String context, BytesReference content, XContentType xContentType, StoredScriptSource source @@ -59,9 +74,9 @@ public PutStoredScriptRequest( super(masterNodeTimeout, ackTimeout); this.id = id; this.context = context; - this.content = content; + this.content = Objects.requireNonNull(content); this.xContentType = Objects.requireNonNull(xContentType); - this.source = source; + this.source = Objects.requireNonNull(source); } @Override @@ -74,10 +89,6 @@ public ActionRequestValidationException validate() { validationException = addValidationError("id cannot contain '#' for stored script", validationException); } - if (content == null) { - validationException = addValidationError("must specify code for stored script", validationException); - } - return validationException; } @@ -85,20 +96,10 @@ public String id() { return id; } - public PutStoredScriptRequest id(String id) { - this.id = id; - return this; - } - public String context() { return context; } - public PutStoredScriptRequest context(String context) { - this.context = context; - return this; - } - public BytesReference content() { return content; } @@ -111,16 +112,6 @@ public StoredScriptSource source() { return source; } - /** - * Set the script source and the content type of the bytes. - */ - public PutStoredScriptRequest content(BytesReference content, XContentType xContentType) { - this.content = content; - this.xContentType = Objects.requireNonNull(xContentType); - this.source = StoredScriptSource.parse(content, xContentType); - return this; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -133,28 +124,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - String source = "_na_"; - - try { - source = XContentHelper.convertToJson(content, false, xContentType); - } catch (Exception e) { - // ignore - } - - return "put stored script {id [" - + id - + "]" - + (context != null ? ", context [" + context + "]" : "") - + ", content [" - + source - + "]}"; + return Strings.format( + "put stored script {id [%s]%s, content [%s]}", + id, + context != null ? ", context [" + context + "]" : "", + source + ); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("script"); - source.toXContent(builder, params); - - return builder; + return builder.field("script", source, params); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 7857e9a22e9b9..cb667400240f0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; @@ -120,27 +119,18 @@ public void onPrimaryOperationComplete( ActionListener listener ) { assert replicaRequest.primaryRefreshResult.refreshed() : "primary has not refreshed"; - boolean fastRefresh = IndexSettings.INDEX_FAST_REFRESH_SETTING.get( - clusterService.state().metadata().index(indexShardRoutingTable.shardId().getIndex()).getSettings() + UnpromotableShardRefreshRequest unpromotableReplicaRequest = new UnpromotableShardRefreshRequest( + indexShardRoutingTable, + replicaRequest.primaryRefreshResult.primaryTerm(), + replicaRequest.primaryRefreshResult.generation(), + false + ); + transportService.sendRequest( + transportService.getLocalNode(), + TransportUnpromotableShardRefreshAction.NAME, + unpromotableReplicaRequest, + new ActionListenerResponseHandler<>(listener.safeMap(r -> null), in -> ActionResponse.Empty.INSTANCE, refreshExecutor) ); - - // Indices marked with fast refresh do not rely on refreshing the unpromotables - if (fastRefresh) { - listener.onResponse(null); - } else { - UnpromotableShardRefreshRequest unpromotableReplicaRequest = new UnpromotableShardRefreshRequest( - indexShardRoutingTable, - replicaRequest.primaryRefreshResult.primaryTerm(), - replicaRequest.primaryRefreshResult.generation(), - false - ); - transportService.sendRequest( - transportService.getLocalNode(), - TransportUnpromotableShardRefreshAction.NAME, - unpromotableReplicaRequest, - new ActionListenerResponseHandler<>(listener.safeMap(r -> null), in -> ActionResponse.Empty.INSTANCE, refreshExecutor) - ); - } } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index 6106e620521f7..5bdecd10075e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.core.Nullable; +import org.elasticsearch.features.NodeFeature; import java.util.ArrayList; import java.util.HashMap; @@ -21,6 +22,9 @@ public class IndexStats implements Iterable { + // feature was effectively reverted but we still need to keep this constant around + public static final NodeFeature REVERTED_TIER_CREATION_DATE = new NodeFeature("stats.tier_creation_date"); + private final String index; private final String uuid; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerUbiElasticsearchDistributionType.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java similarity index 55% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerUbiElasticsearchDistributionType.java rename to server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java index aa19bf6d60c53..558343db1023a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerUbiElasticsearchDistributionType.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java @@ -7,21 +7,17 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.gradle.internal.distribution; +package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.gradle.ElasticsearchDistributionType; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; -public class DockerUbiElasticsearchDistributionType implements ElasticsearchDistributionType { +import java.util.Set; - DockerUbiElasticsearchDistributionType() {} +public class IndicesStatsFeatures implements FeatureSpecification { @Override - public String getName() { - return "dockerUbi"; - } - - @Override - public boolean isDocker() { - return true; + public Set getFeatures() { + return Set.of(IndexStats.REVERTED_TIER_CREATION_DATE); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 65f7e1969ea69..91e0e7cbc1dff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -54,7 +54,17 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse { IndicesStatsResponse(StreamInput in) throws IOException { super(in); shards = in.readArray(ShardStats::new, ShardStats[]::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_STATS_ADDITIONAL_FIELDS_REVERT)) { + indexHealthMap = in.readMap(ClusterHealthStatus::readFrom); + indexStateMap = in.readMap(IndexMetadata.State::readFrom); + } else if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_STATS_ADDITIONAL_FIELDS)) { + indexHealthMap = in.readMap(ClusterHealthStatus::readFrom); + indexStateMap = in.readMap(IndexMetadata.State::readFrom); + in.readMap(StreamInput::readStringCollectionAsList); // unused, reverted + in.readMap(StreamInput::readLong); // unused, reverted + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + // Between 8.1 and INDEX_STATS_ADDITIONAL_FIELDS, we had a different format for the response + // where we only had health and state available. indexHealthMap = in.readMap(ClusterHealthStatus::readFrom); indexStateMap = in.readMap(IndexMetadata.State::readFrom); } else { @@ -174,7 +184,15 @@ public CommonStats getPrimaries() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeArray(shards); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_STATS_ADDITIONAL_FIELDS_REVERT)) { + out.writeMap(indexHealthMap, StreamOutput::writeWriteable); + out.writeMap(indexStateMap, StreamOutput::writeWriteable); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_STATS_ADDITIONAL_FIELDS)) { + out.writeMap(indexHealthMap, StreamOutput::writeWriteable); + out.writeMap(indexStateMap, StreamOutput::writeWriteable); + out.writeMap(Map.of(), StreamOutput::writeStringCollection); + out.writeMap(Map.of(), StreamOutput::writeLong); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeMap(indexHealthMap, StreamOutput::writeWriteable); out.writeMap(indexStateMap, StreamOutput::writeWriteable); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index ce3e189149451..ad1fda2534fab 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -42,6 +42,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -543,7 +544,8 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques var isFailureStoreRequest = isFailureStoreRequest(docWriteRequest); if (isFailureStoreRequest == false && failureStoreCandidate.isFailureStoreEnabled() - && error instanceof VersionConflictEngineException == false) { + && error instanceof VersionConflictEngineException == false + && error instanceof EsRejectedExecutionException == false) { // Prepare the data stream failure store if necessary maybeMarkFailureStoreForRollover(failureStoreCandidate); @@ -563,8 +565,8 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques } } else { // If we can't redirect to a failure store (because either the data stream doesn't have the failure store enabled - // or this request was already targeting a failure store), or this was a version conflict we increment the - // rejected counter. + // or this request was already targeting a failure store), or this was an error that is not eligible for the failure store + // such as a version conflict or a load rejection we increment the rejected counter. failureStoreMetrics.incrementRejected( bulkItemRequest.index(), errorType, diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/ReindexDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/ReindexDataStreamAction.java new file mode 100644 index 0000000000000..814c512c43bec --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/ReindexDataStreamAction.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.datastreams; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class ReindexDataStreamAction extends ActionType { + + public static final ReindexDataStreamAction INSTANCE = new ReindexDataStreamAction(); + public static final String NAME = "indices:admin/data_stream/reindex"; + + public ReindexDataStreamAction() { + super(NAME); + } + + public static class ReindexDataStreamResponse extends ActionResponse implements ToXContentObject { + private final String taskId; + + public ReindexDataStreamResponse(String taskId) { + super(); + this.taskId = taskId; + } + + public ReindexDataStreamResponse(StreamInput in) throws IOException { + super(in); + this.taskId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(taskId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("task", getTaskId()); + builder.endObject(); + return builder; + } + + public String getTaskId() { + return taskId; + } + + @Override + public int hashCode() { + return Objects.hashCode(taskId); + } + + @Override + public boolean equals(Object other) { + return other instanceof ReindexDataStreamResponse && taskId.equals(((ReindexDataStreamResponse) other).taskId); + } + + } + + public static class ReindexDataStreamRequest extends ActionRequest { + private final String sourceDataStream; + + public ReindexDataStreamRequest(String sourceDataStream) { + super(); + this.sourceDataStream = sourceDataStream; + } + + public ReindexDataStreamRequest(StreamInput in) throws IOException { + super(in); + this.sourceDataStream = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(sourceDataStream); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public boolean getShouldStoreResult() { + return true; // do not wait_for_completion + } + + public String getSourceDataStream() { + return sourceDataStream; + } + + @Override + public int hashCode() { + return Objects.hashCode(sourceDataStream); + } + + @Override + public boolean equals(Object other) { + return other instanceof ReindexDataStreamRequest + && sourceDataStream.equals(((ReindexDataStreamRequest) other).sourceDataStream); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 9e535344c9589..a2c7c8664e81a 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -28,9 +28,9 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; @@ -109,10 +109,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { if (iterator == null) { return null; } - return new PlainShardIterator( - iterator.shardId(), - iterator.getShardRoutings().stream().filter(shardRouting -> OperationRouting.canSearchShard(shardRouting, state)).toList() - ); + return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList()); } @Override @@ -126,14 +123,11 @@ protected void asyncShardOperation(GetRequest request, ShardId shardId, ActionLi IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); if (indexShard.routingEntry().isPromotableToPrimary() == false) { - // TODO: Re-evaluate assertion (ES-8227) - // assert indexShard.indexSettings().isFastRefresh() == false - // : "a search shard should not receive a TransportGetAction for an index with fast refresh"; handleGetOnUnpromotableShard(request, indexShard, listener); return; } - assert DiscoveryNode.isStateless(clusterService.getSettings()) == false || indexShard.indexSettings().isFastRefresh() - : "in Stateless a promotable to primary shard can receive a TransportGetAction only if an index has the fast refresh setting"; + assert DiscoveryNode.isStateless(clusterService.getSettings()) == false + : "in Stateless a promotable to primary shard should not receive a TransportGetAction"; if (request.realtime()) { // we are not tied to a refresh cycle here anyway asyncGet(request, shardId, listener); } else { diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 34b3ae50e0b51..0fa770df8e4ef 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; @@ -27,9 +28,9 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; @@ -112,10 +113,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { if (iterator == null) { return null; } - return new PlainShardIterator( - iterator.shardId(), - iterator.getShardRoutings().stream().filter(shardRouting -> OperationRouting.canSearchShard(shardRouting, state)).toList() - ); + return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList()); } @Override @@ -124,15 +122,11 @@ protected void asyncShardOperation(MultiGetShardRequest request, ShardId shardId IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); if (indexShard.routingEntry().isPromotableToPrimary() == false) { - // TODO: Re-evaluate assertion (ES-8227) - // assert indexShard.indexSettings().isFastRefresh() == false - // : "a search shard should not receive a TransportShardMultiGetAction for an index with fast refresh"; handleMultiGetOnUnpromotableShard(request, indexShard, listener); return; } - assert DiscoveryNode.isStateless(clusterService.getSettings()) == false || indexShard.indexSettings().isFastRefresh() - : "in Stateless a promotable to primary shard can receive a TransportShardMultiGetAction only if an index has " - + "the fast refresh setting"; + assert DiscoveryNode.isStateless(clusterService.getSettings()) == false + : "in Stateless a promotable to primary shard should not receive a TransportShardMultiGetAction"; if (request.realtime()) { // we are not tied to a refresh cycle here anyway asyncShardMultiGet(request, shardId, listener); } else { @@ -282,15 +276,15 @@ private void tryShardMultiGetFromTranslog( } else { assert r.segmentGeneration() > -1L; assert r.primaryTerm() > Engine.UNKNOWN_PRIMARY_TERM; - indexShard.waitForPrimaryTermAndGeneration( - r.primaryTerm(), - r.segmentGeneration(), + final ActionListener termAndGenerationListener = ContextPreservingActionListener.wrapPreservingContext( listener.delegateFailureAndWrap( (ll, aLong) -> getExecutor(request, shardId).execute( ActionRunnable.supply(ll, () -> handleLocalGets(request, r.multiGetShardResponse(), shardId)) ) - ) + ), + threadPool.getThreadContext() ); + indexShard.waitForPrimaryTermAndGeneration(r.primaryTerm(), r.segmentGeneration(), termAndGenerationListener); } } }), TransportShardMultiGetFomTranslogAction.Response::new, getExecutor(request, shardId)) diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index 3ed1dfef50053..760b87af49a78 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -80,7 +80,7 @@ public RestStatus status() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); for (PipelineConfiguration pipeline : pipelines) { - builder.field(pipeline.getId(), summary ? Map.of() : pipeline.getConfigAsMap()); + builder.field(pipeline.getId(), summary ? Map.of() : pipeline.getConfig()); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index 9cfc441490859..d6a2d81fdb7d3 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; @@ -41,19 +42,20 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SimulatePipelineRequest.class); private String id; private boolean verbose; - private final BytesReference source; + private final ReleasableBytesReference source; private final XContentType xContentType; private RestApiVersion restApiVersion; /** * Creates a new request with the given source and its content type */ - public SimulatePipelineRequest(BytesReference source, XContentType xContentType) { + public SimulatePipelineRequest(ReleasableBytesReference source, XContentType xContentType) { this(source, xContentType, RestApiVersion.current()); } - public SimulatePipelineRequest(BytesReference source, XContentType xContentType, RestApiVersion restApiVersion) { + public SimulatePipelineRequest(ReleasableBytesReference source, XContentType xContentType, RestApiVersion restApiVersion) { this.source = Objects.requireNonNull(source); + assert source.hasReferences(); this.xContentType = Objects.requireNonNull(xContentType); this.restApiVersion = restApiVersion; } @@ -62,7 +64,7 @@ public SimulatePipelineRequest(BytesReference source, XContentType xContentType, super(in); id = in.readOptionalString(); verbose = in.readBoolean(); - source = in.readBytesReference(); + source = in.readReleasableBytesReference(); xContentType = in.readEnum(XContentType.class); } @@ -88,6 +90,7 @@ public void setVerbose(boolean verbose) { } public BytesReference getSource() { + assert source.hasReferences(); return source; } @@ -250,4 +253,24 @@ private static List parseDocs(Map config, RestAp public RestApiVersion getRestApiVersion() { return restApiVersion; } + + @Override + public final void incRef() { + source.incRef(); + } + + @Override + public final boolean tryIncRef() { + return source.tryIncRef(); + } + + @Override + public final boolean decRef() { + return source.decRef(); + } + + @Override + public final boolean hasReferences() { + return source.hasReferences(); + } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java index 05e30685c6a9b..931b86d15e24b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.xcontent.XContentType; public class SimulatePipelineRequestBuilder extends ActionRequestBuilder { @@ -20,7 +21,7 @@ public class SimulatePipelineRequestBuilder extends ActionRequestBuilder(shard, shardIndex) { + var shardListener = new SearchActionListener(shard, shardIndex) { @Override public void innerOnResponse(Result result) { try { @@ -315,7 +315,15 @@ public void onFailure(Exception e) { releasable.close(); onShardFailure(shardIndex, shard, shardIt, e); } - }); + }; + final Transport.Connection connection; + try { + connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + shardListener.onFailure(e); + return; + } + executePhaseOnShard(shardIt, connection, shardListener); } private void failOnUnavailable(int shardIndex, SearchShardIterator shardIt) { @@ -327,12 +335,12 @@ private void failOnUnavailable(int shardIndex, SearchShardIterator shardIt) { /** * Sends the request to the actual shard. * @param shardIt the shards iterator - * @param shard the shard routing to send the request for + * @param connection to node that the shard is located on * @param listener the listener to notify on response */ protected abstract void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ); diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index c4aea73cc6141..eaf62d1e57e66 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -58,7 +58,7 @@ * sort them according to the provided order. This can be useful for instance to ensure that shards that contain recent * data are executed first when sorting by descending timestamp. */ -final class CanMatchPreFilterSearchPhase extends SearchPhase { +final class CanMatchPreFilterSearchPhase { private final Logger logger; private final SearchRequest request; @@ -92,7 +92,6 @@ final class CanMatchPreFilterSearchPhase extends SearchPhase { CoordinatorRewriteContextProvider coordinatorRewriteContextProvider, ActionListener> listener ) { - super("can_match"); this.logger = logger; this.searchTransportService = searchTransportService; this.nodeIdToConnection = nodeIdToConnection; @@ -128,12 +127,6 @@ private static boolean assertSearchCoordinationThread() { return ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); } - @Override - public void run() { - assert assertSearchCoordinationThread(); - runCoordinatorRewritePhase(); - } - // tries to pre-filter shards based on information that's available to the coordinator // without having to reach out to the actual shards private void runCoordinatorRewritePhase() { @@ -189,7 +182,7 @@ private void consumeResult(boolean canMatch, ShardSearchRequest request) { private void checkNoMissingShards(GroupShardsIterator shards) { assert assertSearchCoordinationThread(); - doCheckNoMissingShards(getName(), request, shards); + SearchPhase.doCheckNoMissingShards("can_match", request, shards, SearchPhase::makeMissingShardsError); } private Map> groupByNode(GroupShardsIterator shards) { @@ -318,7 +311,7 @@ public boolean isForceExecution() { @Override public void onFailure(Exception e) { if (logger.isDebugEnabled()) { - logger.debug(() -> format("Failed to execute [%s] while running [%s] phase", request, getName()), e); + logger.debug(() -> format("Failed to execute [%s] while running [can_match] phase", request), e); } onPhaseFailure("round", e); } @@ -370,7 +363,6 @@ public CanMatchNodeRequest.Shard buildShardLevelRequest(SearchShardIterator shar ); } - @Override public void start() { if (getNumShards() == 0) { finishPhase(); @@ -381,20 +373,21 @@ public void start() { @Override public void onFailure(Exception e) { if (logger.isDebugEnabled()) { - logger.debug(() -> format("Failed to execute [%s] while running [%s] phase", request, getName()), e); + logger.debug(() -> format("Failed to execute [%s] while running [can_match] phase", request), e); } onPhaseFailure("start", e); } @Override protected void doRun() { - CanMatchPreFilterSearchPhase.this.run(); + assert assertSearchCoordinationThread(); + runCoordinatorRewritePhase(); } }); } public void onPhaseFailure(String msg, Exception cause) { - listener.onFailure(new SearchPhaseExecutionException(getName(), msg, cause, ShardSearchFailure.EMPTY_ARRAY)); + listener.onFailure(new SearchPhaseExecutionException("can_match", msg, cause, ShardSearchFailure.EMPTY_ARRAY)); } public Transport.Connection getConnection(SendingTarget sendingTarget) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 69ca1569a7c07..25d59a06664da 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -84,16 +84,9 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction @Override protected void executePhaseOnShard( final SearchShardIterator shardIt, - final SearchShardTarget shard, + final Transport.Connection connection, final SearchActionListener listener ) { - final Transport.Connection connection; - try { - connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); - } catch (Exception e) { - listener.onFailure(e); - return; - } getSearchTransport().sendExecuteDfs(connection, buildShardSearchRequest(shardIt, listener.requestIndex), getTask(), listener); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index e4fef357cb4e9..986f7210c0d1b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -15,8 +15,8 @@ import org.elasticsearch.transport.Transport; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.Objects; +import java.util.function.Function; /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. @@ -35,21 +35,26 @@ public String getName() { return name; } - public void start() { - try { - run(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + protected String missingShardsErrorMessage(StringBuilder missingShards) { + return makeMissingShardsError(missingShards); } - protected String missingShardsErrorMessage(StringBuilder missingShards) { + protected static String makeMissingShardsError(StringBuilder missingShards) { return "Search rejected due to missing shards [" + missingShards + "]. Consider using `allow_partial_search_results` setting to bypass this error."; } protected void doCheckNoMissingShards(String phaseName, SearchRequest request, GroupShardsIterator shardsIts) { + doCheckNoMissingShards(phaseName, request, shardsIts, this::missingShardsErrorMessage); + } + + protected static void doCheckNoMissingShards( + String phaseName, + SearchRequest request, + GroupShardsIterator shardsIts, + Function makeErrorMessage + ) { assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; if (request.allowPartialSearchResults() == false) { final StringBuilder missingShards = new StringBuilder(); @@ -65,7 +70,7 @@ protected void doCheckNoMissingShards(String phaseName, SearchRequest request, G } if (missingShards.isEmpty() == false) { // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = missingShardsErrorMessage(missingShards); + final String msg = makeErrorMessage.apply(missingShards); throw new SearchPhaseExecutionException(phaseName, msg, null, ShardSearchFailure.EMPTY_ARRAY); } } @@ -74,7 +79,7 @@ protected void doCheckNoMissingShards(String phaseName, SearchRequest request, G /** * Releases shard targets that are not used in the docsIdsToLoad. */ - protected void releaseIrrelevantSearchContext(SearchPhaseResult searchPhaseResult, AbstractSearchAsyncAction context) { + protected static void releaseIrrelevantSearchContext(SearchPhaseResult searchPhaseResult, AbstractSearchAsyncAction context) { // we only release search context that we did not fetch from, if we are not scrolling // or using a PIT and if it has at least one hit that didn't make it to the global topDocs if (searchPhaseResult == null) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 84e0e2adea612..f75b84abc2f0f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -91,16 +91,9 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction listener ) { - final Transport.Connection connection; - try { - connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); - } catch (Exception e) { - listener.onFailure(e); - return; - } ShardSearchRequest request = rewriteShardSearchRequest(super.buildShardSearchRequest(shardIt, listener.requestIndex)); getSearchTransport().sendExecuteQuery(connection, request, getTask(), listener); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index eee65134eae33..9e60eedbad6a2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchContextId; @@ -148,7 +147,7 @@ private final class OpenPointInTimePhase implements TransportSearchAction.Search } @Override - public SearchPhase newSearchPhase( + public void runNewSearchPhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -166,7 +165,7 @@ public SearchPhase newSearchPhase( // that is signaled to the local can match through the SearchShardIterator#prefiltered flag. Local shards do need to go // through the local can match phase. if (SearchService.canRewriteToMatchNone(searchRequest.source())) { - return new CanMatchPreFilterSearchPhase( + new CanMatchPreFilterSearchPhase( logger, searchTransportService, connectionLookup, @@ -180,7 +179,7 @@ public SearchPhase newSearchPhase( false, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), listener.delegateFailureAndWrap( - (searchResponseActionListener, searchShardIterators) -> openPointInTimePhase( + (searchResponseActionListener, searchShardIterators) -> runOpenPointInTimePhase( task, searchRequest, executor, @@ -191,11 +190,11 @@ public SearchPhase newSearchPhase( aliasFilter, concreteIndexBoosts, clusters - ).start() + ) ) - ); + ).start(); } else { - return openPointInTimePhase( + runOpenPointInTimePhase( task, searchRequest, executor, @@ -210,7 +209,7 @@ public SearchPhase newSearchPhase( } } - SearchPhase openPointInTimePhase( + void runOpenPointInTimePhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -224,7 +223,7 @@ SearchPhase openPointInTimePhase( ) { assert searchRequest.getMaxConcurrentShardRequests() == pitRequest.maxConcurrentShardRequests() : searchRequest.getMaxConcurrentShardRequests() + " != " + pitRequest.maxConcurrentShardRequests(); - return new AbstractSearchAsyncAction<>( + new AbstractSearchAsyncAction<>( actionName, logger, namedWriteableRegistry, @@ -243,7 +242,6 @@ SearchPhase openPointInTimePhase( searchRequest.getMaxConcurrentShardRequests(), clusters ) { - @Override protected String missingShardsErrorMessage(StringBuilder missingShards) { return "[open_point_in_time] action requires all shards to be available. Missing shards: [" + missingShards @@ -253,16 +251,9 @@ protected String missingShardsErrorMessage(StringBuilder missingShards) { @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener phaseListener ) { - final Transport.Connection connection; - try { - connection = connectionLookup.apply(shardIt.getClusterAlias(), shard.getNodeId()); - } catch (Exception e) { - phaseListener.onFailure(e); - return; - } transportService.sendChildRequest( connection, OPEN_SHARD_READER_CONTEXT_NAME, @@ -290,7 +281,7 @@ public void run() { boolean buildPointInTimeFromSearchResults() { return true; } - }; + }.start(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 9aab5d005b1bb..5d1fb46a53cef 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -69,6 +69,7 @@ import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.node.ResponseCollectorService; import org.elasticsearch.rest.action.search.SearchResponseMetrics; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; @@ -151,6 +152,7 @@ public class TransportSearchAction extends HandledTransportAction mergeShardsIterators( } interface SearchPhaseProvider { - SearchPhase newSearchPhase( + void runNewSearchPhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -1438,7 +1442,7 @@ private class AsyncSearchActionProvider implements SearchPhaseProvider { } @Override - public SearchPhase newSearchPhase( + public void runNewSearchPhase( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -1455,7 +1459,7 @@ public SearchPhase newSearchPhase( if (preFilter) { // only for aggs we need to contact shards even if there are no matches boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null; - return new CanMatchPreFilterSearchPhase( + new CanMatchPreFilterSearchPhase( logger, searchTransportService, connectionLookup, @@ -1468,8 +1472,8 @@ public SearchPhase newSearchPhase( task, requireAtLeastOneMatch, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), - listener.delegateFailureAndWrap( - (l, iters) -> newSearchPhase( + listener.delegateFailureAndWrap((l, iters) -> { + runNewSearchPhase( task, searchRequest, executor, @@ -1482,9 +1486,10 @@ public SearchPhase newSearchPhase( false, threadPool, clusters - ).start() - ) - ); + ); + }) + ).start(); + return; } // for synchronous CCS minimize_roundtrips=false, use the CCSSingleCoordinatorSearchProgressListener // (AsyncSearchTask will not return SearchProgressListener.NOOP, since it uses its own progress listener @@ -1505,7 +1510,7 @@ public SearchPhase newSearchPhase( ); boolean success = false; try { - final SearchPhase searchPhase; + final AbstractSearchAsyncAction searchPhase; if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { searchPhase = new SearchDfsQueryThenFetchAsyncAction( logger, @@ -1547,7 +1552,7 @@ public SearchPhase newSearchPhase( ); } success = true; - return searchPhase; + searchPhase.start(); } finally { if (success == false) { queryResultConsumer.close(); @@ -1840,7 +1845,7 @@ List getLocalShardsIterator( concreteIndices, routingMap, searchRequest.preference(), - searchService.getResponseCollectorService(), + responseCollectorService, searchTransportService.getPendingSearchRequests() ); final Map originalIndices = buildPerIndexOriginalIndices( diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index f418b5617b2a1..d8b57972d604f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -146,7 +146,7 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act if (SearchService.canRewriteToMatchNone(searchRequest.source()) == false) { delegate.onResponse(new SearchShardsResponse(toGroups(shardIts), clusterState.nodes().getAllNodes(), aliasFilters)); } else { - var canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> { + new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> { assert Objects.equals(clusterAlias, searchShardsRequest.clusterAlias()); return transportService.getConnection(clusterState.nodes().get(node)); }, @@ -160,8 +160,7 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act false, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), delegate.map(its -> new SearchShardsResponse(toGroups(its), clusterState.nodes().getAllNodes(), aliasFilters)) - ); - canMatchPhase.start(); + ).start(); } }) ); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java b/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java index 683c3589c893d..7414aeeb2c405 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/PostWriteRefresh.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; @@ -53,9 +52,7 @@ public void refreshShard( case WAIT_UNTIL -> waitUntil(indexShard, location, new ActionListener<>() { @Override public void onResponse(Boolean forced) { - // Fast refresh indices do not depend on the unpromotables being refreshed - boolean fastRefresh = IndexSettings.INDEX_FAST_REFRESH_SETTING.get(indexShard.indexSettings().getSettings()); - if (location != null && (indexShard.routingEntry().isSearchable() == false && fastRefresh == false)) { + if (location != null && indexShard.routingEntry().isSearchable() == false) { refreshUnpromotables(indexShard, location, listener, forced, postWriteRefreshTimeout); } else { listener.onResponse(forced); @@ -68,9 +65,7 @@ public void onFailure(Exception e) { } }); case IMMEDIATE -> immediate(indexShard, listener.delegateFailureAndWrap((l, r) -> { - // Fast refresh indices do not depend on the unpromotables being refreshed - boolean fastRefresh = IndexSettings.INDEX_FAST_REFRESH_SETTING.get(indexShard.indexSettings().getSettings()); - if (indexShard.getReplicationGroup().getRoutingTable().unpromotableShards().size() > 0 && fastRefresh == false) { + if (indexShard.getReplicationGroup().getRoutingTable().unpromotableShards().size() > 0) { sendUnpromotableRequests(indexShard, r.generation(), true, l, postWriteRefreshTimeout); } else { l.onResponse(true); diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 530f22f4bed53..debc64914a171 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -210,7 +210,12 @@ public void writeThin(StreamOutput out) throws IOException { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new ReplicationTask(id, type, action, getDescription(), parentTaskId, headers); + return new ReplicationTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return ReplicationRequest.this.getDescription(); + } + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 699198a8e22c2..56d185645e149 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -17,6 +17,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.PluginsLoader; import java.io.PrintStream; @@ -42,6 +43,9 @@ class Bootstrap { // the loaded settings for the node, not valid until after phase 2 of initialization private final SetOnce nodeEnv = new SetOnce<>(); + // loads information about plugins required for entitlements in phase 2, used by plugins service in phase 3 + private final SetOnce pluginsLoader = new SetOnce<>(); + Bootstrap(PrintStream out, PrintStream err, ServerArgs args) { this.out = out; this.err = err; @@ -72,6 +76,14 @@ Environment environment() { return nodeEnv.get(); } + void setPluginsLoader(PluginsLoader pluginsLoader) { + this.pluginsLoader.set(pluginsLoader); + } + + PluginsLoader pluginsLoader() { + return pluginsLoader.get(); + } + void exitWithNodeValidationException(NodeValidationException e) { Logger logger = LogManager.getLogger(Elasticsearch.class); logger.error("node validation exception\n{}", e.getMessage()); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 2a83f749e7d33..c06ea9305aef8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -30,6 +30,7 @@ import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.Tuple; import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; @@ -41,6 +42,9 @@ import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.plugins.PluginBundle; +import org.elasticsearch.plugins.PluginsLoader; +import org.elasticsearch.plugins.PluginsUtils; import java.io.IOException; import java.io.InputStream; @@ -50,8 +54,10 @@ import java.nio.file.Path; import java.security.Permission; import java.security.Security; +import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -199,12 +205,27 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { VectorUtil.class ); + // load the plugin Java modules and layers now for use in entitlements + var pluginsLoader = PluginsLoader.createPluginsLoader(nodeEnv.modulesFile(), nodeEnv.pluginsFile()); + bootstrap.setPluginsLoader(pluginsLoader); + if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) { - logger.info("Bootstrapping Entitlements"); - EntitlementBootstrap.bootstrap(); + LogManager.getLogger(Elasticsearch.class).info("Bootstrapping Entitlements"); + + List> pluginData = new ArrayList<>(); + Set moduleBundles = PluginsUtils.getModuleBundles(nodeEnv.modulesFile()); + for (PluginBundle moduleBundle : moduleBundles) { + pluginData.add(Tuple.tuple(moduleBundle.getDir(), moduleBundle.pluginDescriptor().isModular())); + } + Set pluginBundles = PluginsUtils.getPluginBundles(nodeEnv.pluginsFile()); + for (PluginBundle pluginBundle : pluginBundles) { + pluginData.add(Tuple.tuple(pluginBundle.getDir(), pluginBundle.pluginDescriptor().isModular())); + } + // TODO: add a functor to map module to plugin name + EntitlementBootstrap.bootstrap(pluginData, callerClass -> null); } else { // install SM after natives, shutdown hooks, etc. - logger.info("Bootstrapping java SecurityManager"); + LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager"); org.elasticsearch.bootstrap.Security.configure( nodeEnv, SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()), @@ -244,7 +265,7 @@ private static void ensureInitialized(Class... classes) { private static void initPhase3(Bootstrap bootstrap) throws IOException, NodeValidationException { checkLucene(); - Node node = new Node(bootstrap.environment()) { + Node node = new Node(bootstrap.environment(), bootstrap.pluginsLoader()) { @Override protected void validateNodeBeforeAcceptingRequests( final BootstrapContext context, diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java index 57b90454c7e8b..ad285cbd391cd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterFeatures.java @@ -95,7 +95,7 @@ public Set allNodeFeatures() { /** * {@code true} if {@code feature} is present on all nodes in the cluster. *

- * NOTE: This should not be used directly, as it does not read historical features. + * NOTE: This should not be used directly. * Please use {@link org.elasticsearch.features.FeatureService#clusterHasFeature} instead. */ @SuppressForbidden(reason = "directly reading cluster features") diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 046f4b6b0b251..c2da33f8f4135 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -139,7 +139,7 @@ public ClusterModule( this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.allocationDeciders = new AllocationDeciders(deciderList); - var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster); + var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster, clusterService.getClusterSettings()); this.shardsAllocator = createShardsAllocator( settings, clusterService.getClusterSettings(), diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index 4e47925d383c2..25c6a1ff5b67f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.block; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,6 +22,7 @@ import java.util.EnumSet; import java.util.Locale; import java.util.Objects; +import java.util.function.Predicate; public class ClusterBlock implements Writeable, ToXContentFragment { @@ -142,7 +144,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(id); out.writeOptionalString(uuid); out.writeString(description); - out.writeEnumSet(levels); + if (out.getTransportVersion().onOrAfter(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK)) { + out.writeEnumSet(levels); + } else { + // do not send ClusterBlockLevel.REFRESH to old nodes + out.writeEnumSet(filterLevels(levels, level -> ClusterBlockLevel.REFRESH.equals(level) == false)); + } out.writeBoolean(retryable); out.writeBoolean(disableStatePersistence); RestStatus.writeTo(out, status); @@ -185,4 +192,19 @@ public int hashCode() { public boolean isAllowReleaseResources() { return allowReleaseResources; } + + static EnumSet filterLevels(EnumSet levels, Predicate predicate) { + assert levels != null; + int size = levels.size(); + if (size == 0 || (size == 1 && predicate.test(levels.iterator().next()))) { + return levels; + } + var filteredLevels = EnumSet.noneOf(ClusterBlockLevel.class); + for (ClusterBlockLevel level : levels) { + if (predicate.test(level)) { + filteredLevels.add(level); + } + } + return filteredLevels; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index f6330fb18e5e6..262044b091ac7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -15,7 +15,8 @@ public enum ClusterBlockLevel { READ, WRITE, METADATA_READ, - METADATA_WRITE; + METADATA_WRITE, + REFRESH; public static final EnumSet ALL = EnumSet.allOf(ClusterBlockLevel.class); public static final EnumSet READ_WRITE = EnumSet.of(READ, WRITE); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 6841c6e49e3f1..62d3f5e5866bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -467,7 +467,7 @@ public void handleCommit(ApplyCommitRequest applyCommit) { logger.debug( "handleCommit: ignored commit request due to term mismatch " + "(expected: [term {} version {}], actual: [term {} version {}])", - getLastAcceptedTerm(), + getCurrentTerm(), getLastAcceptedVersion(), applyCommit.getTerm(), applyCommit.getVersion() diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 8a4464f194fc5..35b6eb1852237 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -246,6 +246,7 @@ public Coordinator( this.joinValidationService = new JoinValidationService( settings, transportService, + namedWriteableRegistry, this::getStateForJoinValidationService, () -> getLastAcceptedState().metadata(), this.onJoinValidators diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index 7de7fd4d92d1b..9d5d74fa24648 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -21,6 +21,8 @@ import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -106,6 +108,7 @@ public class JoinValidationService { public JoinValidationService( Settings settings, TransportService transportService, + NamedWriteableRegistry namedWriteableRegistry, Supplier clusterStateSupplier, Supplier metadataSupplier, Collection> joinValidators @@ -120,9 +123,9 @@ public JoinValidationService( transportService.registerRequestHandler( JoinValidationService.JOIN_VALIDATE_ACTION_NAME, this.responseExecutor, - ValidateJoinRequest::new, + BytesTransportRequest::new, (request, channel, task) -> { - final var remoteState = request.getOrReadState(); + final var remoteState = readClusterState(namedWriteableRegistry, request); final var remoteMetadata = remoteState.metadata(); final var localMetadata = metadataSupplier.get(); if (localMetadata.clusterUUIDCommitted() && localMetadata.clusterUUID().equals(remoteMetadata.clusterUUID()) == false) { @@ -145,6 +148,20 @@ public JoinValidationService( ); } + private static ClusterState readClusterState(NamedWriteableRegistry namedWriteableRegistry, BytesTransportRequest request) + throws IOException { + try ( + var bytesStreamInput = request.bytes().streamInput(); + var in = new NamedWriteableAwareStreamInput( + CompressorFactory.COMPRESSOR.threadLocalStreamInput(bytesStreamInput), + namedWriteableRegistry + ) + ) { + in.setTransportVersion(request.version()); + return ClusterState.readFrom(in, null); + } + } + public void validateJoin(DiscoveryNode discoveryNode, ActionListener listener) { // This node isn't in the cluster yet so ClusterState#getMinTransportVersion() doesn't apply, we must obtain a specific connection // so we can check its transport version to decide how to proceed. diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java deleted file mode 100644 index c81e4877196b3..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.cluster.coordination; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.RefCounted; -import org.elasticsearch.transport.TransportRequest; - -import java.io.IOException; - -public class ValidateJoinRequest extends TransportRequest { - private final CheckedSupplier stateSupplier; - private final RefCounted refCounted; - - public ValidateJoinRequest(StreamInput in) throws IOException { - super(in); - // recent versions send a BytesTransportRequest containing a compressed representation of the state - final var bytes = in.readReleasableBytesReference(); - final var version = in.getTransportVersion(); - final var namedWriteableRegistry = in.namedWriteableRegistry(); - this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); - this.refCounted = bytes; - } - - private static ClusterState readCompressed( - TransportVersion version, - BytesReference bytes, - NamedWriteableRegistry namedWriteableRegistry - ) throws IOException { - try ( - var bytesStreamInput = bytes.streamInput(); - var in = new NamedWriteableAwareStreamInput( - CompressorFactory.COMPRESSOR.threadLocalStreamInput(bytesStreamInput), - namedWriteableRegistry - ) - ) { - in.setTransportVersion(version); - return ClusterState.readFrom(in, null); - } - } - - public ValidateJoinRequest(ClusterState state) { - this.stateSupplier = () -> state; - this.refCounted = null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - stateSupplier.get().writeTo(out); - } - - public ClusterState getOrReadState() throws IOException { - return stateSupplier.get(); - } - - @Override - public void incRef() { - if (refCounted != null) { - refCounted.incRef(); - } - } - - @Override - public boolean tryIncRef() { - return refCounted == null || refCounted.tryIncRef(); - } - - @Override - public boolean decRef() { - return refCounted != null && refCounted.decRef(); - } - - @Override - public boolean hasReferences() { - return refCounted == null || refCounted.hasReferences(); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java deleted file mode 100644 index 4d9074be15695..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListener.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.features; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.admin.cluster.node.features.NodeFeatures; -import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesRequest; -import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesResponse; -import org.elasticsearch.action.admin.cluster.node.features.TransportNodesFeaturesAction; -import org.elasticsearch.client.internal.ClusterAdminClient; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterFeatures; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.MasterServiceTaskQueue; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; -import org.elasticsearch.threadpool.Scheduler; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Collections; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Executor; -import java.util.stream.Collectors; - -@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // this can be removed in v9 -public class NodeFeaturesFixupListener implements ClusterStateListener { - - private static final Logger logger = LogManager.getLogger(NodeFeaturesFixupListener.class); - - private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); - - private final MasterServiceTaskQueue taskQueue; - private final ClusterAdminClient client; - private final Scheduler scheduler; - private final Executor executor; - private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); - - public NodeFeaturesFixupListener(ClusterService service, ClusterAdminClient client, ThreadPool threadPool) { - // there tends to be a lot of state operations on an upgrade - this one is not time-critical, - // so use LOW priority. It just needs to be run at some point after upgrade. - this( - service.createTaskQueue("fix-node-features", Priority.LOW, new NodesFeaturesUpdater()), - client, - threadPool, - threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) - ); - } - - NodeFeaturesFixupListener( - MasterServiceTaskQueue taskQueue, - ClusterAdminClient client, - Scheduler scheduler, - Executor executor - ) { - this.taskQueue = taskQueue; - this.client = client; - this.scheduler = scheduler; - this.executor = executor; - } - - class NodesFeaturesTask implements ClusterStateTaskListener { - private final Map> results; - private final int retryNum; - - NodesFeaturesTask(Map> results, int retryNum) { - this.results = results; - this.retryNum = retryNum; - } - - @Override - public void onFailure(Exception e) { - logger.error("Could not apply features for nodes {} to cluster state", results.keySet(), e); - scheduleRetry(results.keySet(), retryNum); - } - - public Map> results() { - return results; - } - } - - static class NodesFeaturesUpdater implements ClusterStateTaskExecutor { - @Override - public ClusterState execute(BatchExecutionContext context) { - ClusterState.Builder builder = ClusterState.builder(context.initialState()); - var existingFeatures = builder.nodeFeatures(); - - boolean modified = false; - for (var c : context.taskContexts()) { - for (var e : c.getTask().results().entrySet()) { - // double check there are still no features for the node - if (existingFeatures.getOrDefault(e.getKey(), Set.of()).isEmpty()) { - builder.putNodeFeatures(e.getKey(), e.getValue()); - modified = true; - } - } - c.success(() -> {}); - } - return modified ? builder.build() : context.initialState(); - } - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - if (event.nodesDelta().masterNodeChanged() && event.localNodeMaster()) { - /* - * Execute this if we have just become master. - * Check if there are any nodes that should have features in cluster state, but don't. - * This can happen if the master was upgraded from before 8.13, and one or more non-master nodes - * were already upgraded. They don't re-join the cluster with the new master, so never get their features - * (which the master now understands) added to cluster state. - * So we need to do a separate transport call to get the node features and add them to cluster state. - * We can't use features to determine when this should happen, as the features are incorrect. - * We also can't use transport version, as that is unreliable for upgrades - * from versions before 8.8 (see TransportVersionFixupListener). - * So the only thing we can use is release version. - * This is ok here, as Serverless will never hit this case, so the node feature fetch action will never be called on Serverless. - * This whole class will be removed in ES v9. - */ - ClusterFeatures nodeFeatures = event.state().clusterFeatures(); - Set queryNodes = event.state() - .nodes() - .stream() - .filter(n -> n.getVersion().onOrAfter(Version.V_8_15_0)) - .map(DiscoveryNode::getId) - .filter(n -> getNodeFeatures(nodeFeatures, n).isEmpty()) - .collect(Collectors.toSet()); - - if (queryNodes.isEmpty() == false) { - logger.debug("Fetching actual node features for nodes {}", queryNodes); - queryNodesFeatures(queryNodes, 0); - } - } - } - - @SuppressForbidden(reason = "Need to access a specific node's features") - private static Set getNodeFeatures(ClusterFeatures features, String nodeId) { - return features.nodeFeatures().getOrDefault(nodeId, Set.of()); - } - - private void scheduleRetry(Set nodes, int thisRetryNum) { - // just keep retrying until this succeeds - logger.debug("Scheduling retry {} for nodes {}", thisRetryNum + 1, nodes); - scheduler.schedule(() -> queryNodesFeatures(nodes, thisRetryNum + 1), RETRY_TIME, executor); - } - - private void queryNodesFeatures(Set nodes, int retryNum) { - // some might already be in-progress - Set outstandingNodes = Sets.newHashSetWithExpectedSize(nodes.size()); - synchronized (pendingNodes) { - for (String n : nodes) { - if (pendingNodes.add(n)) { - outstandingNodes.add(n); - } - } - } - if (outstandingNodes.isEmpty()) { - // all nodes already have in-progress requests - return; - } - - NodesFeaturesRequest request = new NodesFeaturesRequest(outstandingNodes.toArray(String[]::new)); - client.execute(TransportNodesFeaturesAction.TYPE, request, new ActionListener<>() { - @Override - public void onResponse(NodesFeaturesResponse response) { - pendingNodes.removeAll(outstandingNodes); - handleResponse(response, retryNum); - } - - @Override - public void onFailure(Exception e) { - pendingNodes.removeAll(outstandingNodes); - logger.warn("Could not read features for nodes {}", outstandingNodes, e); - scheduleRetry(outstandingNodes, retryNum); - } - }); - } - - private void handleResponse(NodesFeaturesResponse response, int retryNum) { - if (response.hasFailures()) { - Set failedNodes = new HashSet<>(); - for (FailedNodeException fne : response.failures()) { - logger.warn("Failed to read features from node {}", fne.nodeId(), fne); - failedNodes.add(fne.nodeId()); - } - scheduleRetry(failedNodes, retryNum); - } - // carry on and read what we can - - Map> results = response.getNodes() - .stream() - .collect(Collectors.toUnmodifiableMap(n -> n.getNode().getId(), NodeFeatures::nodeFeatures)); - - if (results.isEmpty() == false) { - taskQueue.submitTask("fix-node-features", new NodesFeaturesTask(results, retryNum), null); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index fb8559b19d81d..de3343c1944c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -37,7 +37,6 @@ import java.util.Objects; import java.util.Set; import java.util.TreeSet; -import java.util.function.Predicate; import java.util.regex.Pattern; import static java.lang.String.format; @@ -48,7 +47,6 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparable { public static final NodeFeature RANGE_FLOAT_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.range_float_processors"); - public static final NodeFeature DOUBLE_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.double_processors"); public static final NodeFeature DESIRED_NODE_VERSION_DEPRECATED = new NodeFeature("desired_node.version_deprecated"); public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; @@ -348,10 +346,6 @@ public Set getRoles() { return roles; } - public boolean clusterHasRequiredFeatures(Predicate clusterHasFeature) { - return (processorsRange == null && processors.hasDecimals() == false) || clusterHasFeature.test(RANGE_FLOAT_PROCESSORS_SUPPORTED); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 6456240c2317e..681ea84513088 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -140,6 +140,15 @@ public class IndexMetadata implements Diffable, ToXContentFragmen RestStatus.TOO_MANY_REQUESTS, EnumSet.of(ClusterBlockLevel.WRITE) ); + public static final ClusterBlock INDEX_REFRESH_BLOCK = new ClusterBlock( + 14, + "index refresh blocked, waiting for shard(s) to be started", + true, + false, + false, + RestStatus.REQUEST_TIMEOUT, + EnumSet.of(ClusterBlockLevel.REFRESH) + ); // 'event.ingested' (part of Elastic Common Schema) range is tracked in cluster state, along with @timestamp public static final String EVENT_INGESTED_FIELD_NAME = "event.ingested"; @@ -1776,7 +1785,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function dataStreamNames(ClusterState state, IndicesOptions options, getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(context, indexExpressions); + final Collection expressions = resolveExpressionsToResources(context, indexExpressions); return expressions.stream() .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) @@ -220,7 +227,7 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(context, request.index()); + final Collection expressions = resolveExpressionsToResources(context, request.index()); if (expressions.size() == 1) { IndexAbstraction ia = state.metadata().getIndicesLookup().get(expressions.iterator().next()); @@ -236,7 +243,7 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit ); } } - checkSystemIndexAccess(context, Set.of(ia.getWriteIndex())); + SystemResourceAccess.checkSystemIndexAccess(context, threadContext, ia.getWriteIndex()); return ia; } else { throw new IllegalArgumentException( @@ -245,30 +252,110 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit } } - protected static Collection resolveExpressions(Context context, String... expressions) { - if (context.getOptions().expandWildcardExpressions() == false) { + /** + * Resolve the expression to the set of indices, aliases, and, optionally, data streams that the expression matches. + * If {@param preserveDataStreams} is {@code true}, data streams that are covered by the wildcards from the + * {@param expressions} are returned as-is, without expanding them further to their respective backing indices. + */ + protected static Collection resolveExpressionsToResources(Context context, String... expressions) { + // If we do not expand wildcards, then empty or _all expression result in an empty list + boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + if (expandWildcards == false) { if (expressions == null || expressions.length == 0 || expressions.length == 1 && Metadata.ALL.equals(expressions[0])) { return List.of(); - } else { - return ExplicitResourceNameFilter.filterUnavailable( - context, - DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) - ); } } else { if (expressions == null || expressions.length == 0 || expressions.length == 1 && (Metadata.ALL.equals(expressions[0]) || Regex.isMatchAllPattern(expressions[0]))) { return WildcardExpressionResolver.resolveAll(context); + } else if (isNoneExpression(expressions)) { + return List.of(); + } + } + + // Using ArrayList when we know we do not have wildcards is an optimisation, given that one expression result in 0 or 1 resources. + Collection resources = expandWildcards && WildcardExpressionResolver.hasWildcards(expressions) + ? new LinkedHashSet<>() + : new ArrayList<>(expressions.length); + boolean wildcardSeen = false; + for (int i = 0, n = expressions.length; i < n; i++) { + String originalExpression = expressions[i]; + + // Resolve exclusion, a `-` prefixed expression is an exclusion only if it succeeds a wildcard. + boolean isExclusion = wildcardSeen && originalExpression.startsWith("-"); + String baseExpression = isExclusion ? originalExpression.substring(1) : originalExpression; + + // Resolve date math + baseExpression = DateMathExpressionResolver.resolveExpression(baseExpression, context::getStartTime); + + // Validate base expression + validateResourceExpression(context, baseExpression, expressions); + + // Check if it's wildcard + boolean isWildcard = expandWildcards && WildcardExpressionResolver.isWildcard(originalExpression); + wildcardSeen |= isWildcard; + + if (isWildcard) { + Set matchingResources = WildcardExpressionResolver.matchWildcardToResources(context, baseExpression); + + if (context.getOptions().allowNoIndices() == false && matchingResources.isEmpty()) { + throw notFoundException(baseExpression); + } + + if (isExclusion) { + resources.removeAll(matchingResources); + } else { + resources.addAll(matchingResources); + } } else { - return WildcardExpressionResolver.resolve( - context, - ExplicitResourceNameFilter.filterUnavailable( - context, - DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) - ) - ); + if (isExclusion) { + resources.remove(baseExpression); + } else if (ensureAliasOrIndexExists(context, baseExpression)) { + resources.add(baseExpression); + } + } + } + return resources; + } + + /** + * Validates the requested expression by performing the following checks: + * - Ensure it's not empty + * - Ensure it doesn't start with `_` + * - Ensure it's not a remote expression unless the allow unavailable targets is enabled. + */ + private static void validateResourceExpression(Context context, String current, String[] expressions) { + if (Strings.isEmpty(current)) { + throw notFoundException(current); + } + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (current.charAt(0) == '_') { + throw new InvalidIndexNameException(current, "must not start with '_'."); + } + ensureRemoteExpressionRequireIgnoreUnavailable(context.getOptions(), current, expressions); + } + + /** + * Throws an exception if the expression is a remote expression and we do not allow unavailable targets + */ + private static void ensureRemoteExpressionRequireIgnoreUnavailable(IndicesOptions options, String current, String[] expressions) { + if (options.ignoreUnavailable()) { + return; + } + if (RemoteClusterAware.isRemoteIndexName(current)) { + List crossClusterIndices = new ArrayList<>(); + for (int i = 0; i < expressions.length; i++) { + if (RemoteClusterAware.isRemoteIndexName(expressions[i])) { + crossClusterIndices.add(expressions[i]); + } } + throw new IllegalArgumentException( + "Cross-cluster calls are not supported in this context but remote indices were requested: " + crossClusterIndices + ); } } @@ -341,7 +428,7 @@ String[] concreteIndexNames(Context context, String... indexExpressions) { } Index[] concreteIndices(Context context, String... indexExpressions) { - final Collection expressions = resolveExpressions(context, indexExpressions); + final Collection expressions = resolveExpressionsToResources(context, indexExpressions); final Set concreteIndicesResult = Sets.newLinkedHashSetWithExpectedSize(expressions.size()); final Map indicesLookup = context.getState().metadata().getIndicesLookup(); @@ -395,7 +482,9 @@ Index[] concreteIndices(Context context, String... indexExpressions) { && context.getOptions().includeFailureIndices()) { // Collect the data streams involved Set aliasDataStreams = new HashSet<>(); - for (Index index : indexAbstraction.getIndices()) { + List indices = indexAbstraction.getIndices(); + for (int i = 0, n = indices.size(); i < n; i++) { + Index index = indices.get(i); aliasDataStreams.add(indicesLookup.get(index.getName()).getParentDataStream()); } for (DataStream dataStream : aliasDataStreams) { @@ -416,13 +505,16 @@ Index[] concreteIndices(Context context, String... indexExpressions) { if (context.getOptions().allowNoIndices() == false && concreteIndicesResult.isEmpty()) { throw notFoundException(indexExpressions); } - checkSystemIndexAccess(context, concreteIndicesResult); - return concreteIndicesResult.toArray(Index.EMPTY_ARRAY); + Index[] resultArray = concreteIndicesResult.toArray(Index.EMPTY_ARRAY); + SystemResourceAccess.checkSystemIndexAccess(context, threadContext, resultArray); + return resultArray; } private static void resolveIndicesForDataStream(Context context, DataStream dataStream, Set concreteIndicesResult) { if (shouldIncludeRegularIndices(context.getOptions())) { - for (Index index : dataStream.getIndices()) { + List indices = dataStream.getIndices(); + for (int i = 0, n = indices.size(); i < n; i++) { + Index index = indices.get(i); if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } @@ -431,7 +523,9 @@ private static void resolveIndicesForDataStream(Context context, DataStream data if (shouldIncludeFailureIndices(context.getOptions())) { // We short-circuit here, if failure indices are not allowed and they can be skipped if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { - for (Index index : dataStream.getFailureIndices().getIndices()) { + List failureIndices = dataStream.getFailureIndices().getIndices(); + for (int i = 0, n = failureIndices.size(); i < n; i++) { + Index index = failureIndices.get(i); if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } @@ -482,64 +576,6 @@ private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstract return indexAbstraction.getIndices().size() > 1; } - private void checkSystemIndexAccess(Context context, Set concreteIndices) { - final Predicate systemIndexAccessPredicate = context.getSystemIndexAccessPredicate(); - if (systemIndexAccessPredicate == Predicates.always()) { - return; - } - doCheckSystemIndexAccess(context, concreteIndices, systemIndexAccessPredicate); - } - - private void doCheckSystemIndexAccess(Context context, Set concreteIndices, Predicate systemIndexAccessPredicate) { - final Metadata metadata = context.getState().metadata(); - final List resolvedSystemIndices = new ArrayList<>(); - final List resolvedNetNewSystemIndices = new ArrayList<>(); - final Set resolvedSystemDataStreams = new HashSet<>(); - final SortedMap indicesLookup = metadata.getIndicesLookup(); - boolean matchedIndex = false; - for (Index concreteIndex : concreteIndices) { - IndexMetadata idxMetadata = metadata.index(concreteIndex); - String name = concreteIndex.getName(); - if (idxMetadata.isSystem() && systemIndexAccessPredicate.test(name) == false) { - matchedIndex = true; - IndexAbstraction indexAbstraction = indicesLookup.get(name); - if (indexAbstraction.getParentDataStream() != null) { - resolvedSystemDataStreams.add(indexAbstraction.getParentDataStream().getName()); - } else if (systemIndices.isNetNewSystemIndex(name)) { - resolvedNetNewSystemIndices.add(name); - } else { - resolvedSystemIndices.add(name); - } - } - } - if (matchedIndex) { - handleMatchedSystemIndices(resolvedSystemIndices, resolvedSystemDataStreams, resolvedNetNewSystemIndices); - } - } - - private void handleMatchedSystemIndices( - List resolvedSystemIndices, - Set resolvedSystemDataStreams, - List resolvedNetNewSystemIndices - ) { - if (resolvedSystemIndices.isEmpty() == false) { - Collections.sort(resolvedSystemIndices); - deprecationLogger.warn( - DeprecationCategory.API, - "open_system_index_access", - "this request accesses system indices: {}, but in a future major version, direct access to system " - + "indices will be prevented by default", - resolvedSystemIndices - ); - } - if (resolvedSystemDataStreams.isEmpty() == false) { - throw SystemIndices.dataStreamAccessException(threadContext, resolvedSystemDataStreams); - } - if (resolvedNetNewSystemIndices.isEmpty() == false) { - throw SystemIndices.netNewSystemIndexAccessException(threadContext, resolvedNetNewSystemIndices); - } - } - private static IndexNotFoundException notFoundException(String... indexExpressions) { final IndexNotFoundException infe; if (indexExpressions == null @@ -568,16 +604,16 @@ private static IndexNotFoundException notFoundException(String... indexExpressio } private static boolean shouldTrackConcreteIndex(Context context, Index index) { - if (context.systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY - && context.netNewSystemIndexPredicate.test(index.getName())) { + if (SystemResourceAccess.isNetNewInBackwardCompatibleMode(context, index)) { // Exclude this one as it's a net-new system index, and we explicitly don't want those. return false; } + IndicesOptions options = context.getOptions(); if (DataStream.isFailureStoreFeatureFlagEnabled() && context.options.allowFailureIndices() == false) { DataStream parentDataStream = context.getState().metadata().getIndicesLookup().get(index.getName()).getParentDataStream(); if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { if (parentDataStream.isFailureStoreIndex(index.getName())) { - if (context.options.ignoreUnavailable()) { + if (options.ignoreUnavailable()) { return false; } else { throw new FailureIndexNotSupportedException(index); @@ -587,7 +623,6 @@ private static boolean shouldTrackConcreteIndex(Context context, Index index) { } final IndexMetadata imd = context.state.metadata().index(index); if (imd.getState() == IndexMetadata.State.CLOSE) { - IndicesOptions options = context.options; if (options.forbidClosedIndices() && options.ignoreUnavailable() == false) { throw new IndexClosedException(index); } else { @@ -721,21 +756,6 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) return state.metadata().hasIndexAbstraction(resolvedAliasOrIndex); } - /** - * @return If the specified string is data math expression then this method returns the resolved expression. - */ - public static String resolveDateMathExpression(String dateExpression) { - return DateMathExpressionResolver.resolveExpression(dateExpression); - } - - /** - * @param time instant to consider when parsing the expression - * @return If the specified string is data math expression then this method returns the resolved expression. - */ - public static String resolveDateMathExpression(String dateExpression, long time) { - return DateMathExpressionResolver.resolveExpression(dateExpression, () -> time); - } - /** * Resolve an array of expressions to the set of indices and aliases that these expressions match. */ @@ -765,7 +785,8 @@ public Set resolveExpressions( getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - Collection resolved = resolveExpressions(context, expressions); + // unmodifiable without creating a new collection as it might contain many items + Collection resolved = resolveExpressionsToResources(context, expressions); if (resolved instanceof Set) { // unmodifiable without creating a new collection as it might contain many items return Collections.unmodifiableSet((Set) resolved); @@ -779,7 +800,7 @@ public Set resolveExpressions( * given index. *

Only aliases with filters are returned. If the indices list contains a non-filtering reference to * the index itself - null is returned. Returns {@code null} if no filtering is required. - * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressions}. + * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressionsToResources(Context, String...)}. */ public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { return indexAliases(state, index, AliasMetadata::filteringRequired, DataStreamAlias::filteringRequired, false, resolvedExpressions); @@ -799,7 +820,8 @@ boolean iterateIndexAliases(int indexAliasesSize, int resolvedExpressionsSize) { * Iterates through the list of indices and selects the effective list of required aliases for the given index. *

Only aliases where the given predicate tests successfully are returned. If the indices list contains a non-required reference to * the index itself - null is returned. Returns {@code null} if no filtering is required. - *

NOTE: the provided expressions must have been resolved already via {@link #resolveExpressions}. + *

NOTE: the provided expressions must have been resolved already via + * {@link #resolveExpressionsToResources(Context, String...)}. */ public String[] indexAliases( ClusterState state, @@ -878,7 +900,8 @@ public String[] indexAliases( .toArray(AliasMetadata[]::new); } List aliases = null; - for (AliasMetadata aliasMetadata : aliasCandidates) { + for (int i = 0; i < aliasCandidates.length; i++) { + AliasMetadata aliasMetadata = aliasCandidates[i]; if (requiredAlias.test(aliasMetadata)) { // If required - add it to the list of aliases if (aliases == null) { @@ -914,7 +937,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection resolvedExpressions = resolveExpressions(context, expressions); + final Collection resolvedExpressions = resolveExpressionsToResources(context, expressions); // TODO: it appears that this can never be true? if (isAllIndices(resolvedExpressions)) { @@ -932,7 +955,8 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab for (String expression : resolvedExpressions) { IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(expression); if (indexAbstraction != null && indexAbstraction.getType() == Type.ALIAS) { - for (Index index : indexAbstraction.getIndices()) { + for (int i = 0, n = indexAbstraction.getIndices().size(); i < n; i++) { + Index index = indexAbstraction.getIndices().get(i); String concreteIndex = index.getName(); if (norouting.contains(concreteIndex) == false) { AliasMetadata aliasMetadata = state.metadata().index(concreteIndex).getAliases().get(indexAbstraction.getName()); @@ -961,7 +985,8 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab continue; } if (dataStream.getIndices() != null) { - for (Index index : dataStream.getIndices()) { + for (int i = 0, n = dataStream.getIndices().size(); i < n; i++) { + Index index = dataStream.getIndices().get(i); String concreteIndex = index.getName(); routings = collectRoutings(routings, paramRouting, norouting, concreteIndex); } @@ -1006,8 +1031,8 @@ public static Map> resolveSearchRoutingAllIndices(Metadata m Set r = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); Map> routings = new HashMap<>(); String[] concreteIndices = metadata.getConcreteAllIndices(); - for (String index : concreteIndices) { - routings.put(index, r); + for (int i = 0; i < concreteIndices.length; i++) { + routings.put(concreteIndices[i], r); } return routings; } @@ -1036,6 +1061,16 @@ static boolean isExplicitAllPattern(Collection aliasesOrIndices) { return aliasesOrIndices != null && aliasesOrIndices.size() == 1 && Metadata.ALL.equals(aliasesOrIndices.iterator().next()); } + /** + * Identifies if this expression list is *,-* which effectively means a request that requests no indices. + */ + static boolean isNoneExpression(String[] expressions) { + return expressions.length == 2 && "*".equals(expressions[0]) && "-*".equals(expressions[1]); + } + + /** + * @return the system access level that will be applied in this resolution. See {@link SystemIndexAccessLevel} for details. + */ public SystemIndexAccessLevel getSystemIndexAccessLevel() { final SystemIndexAccessLevel accessLevel = SystemIndices.getSystemIndexAccessLevel(threadContext); assert accessLevel != SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY @@ -1043,6 +1078,14 @@ public SystemIndexAccessLevel getSystemIndexAccessLevel() { return accessLevel; } + /** + * Determines the right predicate based on the {@link IndexNameExpressionResolver#getSystemIndexAccessLevel()}. Specifically: + * - NONE implies no access to net-new system indices and data streams + * - BACKWARDS_COMPATIBLE_ONLY allows access also to net-new system resources + * - ALL allows access to everything + * - otherwise we fall back to {@link SystemIndices#getProductSystemIndexNamePredicate(ThreadContext)} + * @return the predicate that defines the access to system indices. + */ public Predicate getSystemIndexAccessPredicate() { final SystemIndexAccessLevel systemIndexAccessLevel = getSystemIndexAccessLevel(); final Predicate systemIndexAccessLevelPredicate; @@ -1067,6 +1110,43 @@ public Predicate getNetNewSystemIndexPredicate() { return systemIndices::isNetNewSystemIndex; } + /** + * This returns `true` if the given {@param name} is of a resource that exists. + * Otherwise, it returns `false` if the `ignore_unvailable` option is `true`, or, if `false`, it throws a "not found" type of + * exception. + */ + @Nullable + private static boolean ensureAliasOrIndexExists(Context context, String name) { + boolean ignoreUnavailable = context.getOptions().ignoreUnavailable(); + IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(name); + if (indexAbstraction == null) { + if (ignoreUnavailable) { + return false; + } else { + throw notFoundException(name); + } + } + // treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) + if (indexAbstraction.getType() == Type.ALIAS && context.getOptions().ignoreAliases()) { + if (ignoreUnavailable) { + return false; + } else { + throw aliasesNotSupportedException(name); + } + } + if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { + if (ignoreUnavailable) { + return false; + } else { + IndexNotFoundException infe = notFoundException(name); + // Allows callers to handle IndexNotFoundException differently based on whether data streams were excluded. + infe.addMetadata(EXCLUDED_DATA_STREAMS_KEY, "true"); + throw infe; + } + } + return true; + } + public static class Context { private final ClusterState state; @@ -1242,7 +1322,7 @@ public Predicate getSystemIndexAccessPredicate() { } /** - * Resolves alias/index name expressions with wildcards into the corresponding concrete indices/aliases + * Resolves name expressions with wildcards into the corresponding concrete indices/aliases/data streams */ static final class WildcardExpressionResolver { @@ -1251,8 +1331,8 @@ private WildcardExpressionResolver() { } /** - * Returns all the indices, datastreams, and aliases, considering the open/closed, system, and hidden context parameters. - * Depending on the context, returns the names of the datastreams themselves or their backing indices. + * Returns all the indices, data streams, and aliases, considering the open/closed, system, and hidden context parameters. + * Depending on the context, returns the names of the data streams themselves or their backing indices. */ public static Collection resolveAll(Context context) { List concreteIndices = resolveEmptyOrTrivialWildcard(context); @@ -1261,16 +1341,17 @@ public static Collection resolveAll(Context context) { return concreteIndices; } - Stream ias = context.getState() + Set resolved = new HashSet<>(concreteIndices.size()); + context.getState() .metadata() .getIndicesLookup() .values() .stream() .filter(ia -> context.getOptions().expandWildcardsHidden() || ia.isHidden() == false) .filter(ia -> shouldIncludeIfDataStream(ia, context) || shouldIncludeIfAlias(ia, context)) - .filter(ia -> ia.isSystem() == false || context.systemIndexAccessPredicate.test(ia.getName())); + .filter(ia -> ia.isSystem() == false || context.systemIndexAccessPredicate.test(ia.getName())) + .forEach(ia -> resolved.addAll(expandToOpenClosed(context, ia))); - Set resolved = expandToOpenClosed(context, ias).collect(Collectors.toSet()); resolved.addAll(concreteIndices); return resolved; } @@ -1283,73 +1364,6 @@ private static boolean shouldIncludeIfAlias(IndexAbstraction ia, IndexNameExpres return context.getOptions().ignoreAliases() == false && ia.getType() == Type.ALIAS; } - /** - * Returns all the existing resource (index, alias and datastream) names that the {@param expressions} list resolves to. - * The passed-in {@param expressions} can contain wildcards and exclusions, as well as plain resource names. - *
- * The return is a {@code Collection} (usually a {@code Set} but can also be a {@code List}, for performance reasons) of plain - * resource names only. All the returned resources are "accessible", in the given context, i.e. the resources exist - * and are not an alias or a datastream if the context does not permit it. - * Wildcard expressions, depending on the context: - *

    - *
  1. might throw an exception if they don't resolve to anything
  2. - *
  3. might not resolve to hidden or system resources (but plain names can refer to hidden or system resources)
  4. - *
  5. might resolve to aliases and datastreams, and it could be (depending on the context) that their backing indices are what's - * ultimately returned, instead of the alias or datastream name
  6. - *
- */ - public static Collection resolve(Context context, List expressions) { - // fast exit if there are no wildcards to evaluate - if (context.getOptions().expandWildcardExpressions() == false) { - return expressions; - } - int firstWildcardIndex = 0; - for (; firstWildcardIndex < expressions.size(); firstWildcardIndex++) { - String expression = expressions.get(firstWildcardIndex); - if (isWildcard(expression)) { - break; - } - } - if (firstWildcardIndex == expressions.size()) { - return expressions; - } - Set result = new HashSet<>(); - for (int i = 0; i < firstWildcardIndex; i++) { - result.add(expressions.get(i)); - } - AtomicBoolean emptyWildcardExpansion = context.getOptions().allowNoIndices() ? null : new AtomicBoolean(); - for (int i = firstWildcardIndex; i < expressions.size(); i++) { - String expression = expressions.get(i); - boolean isExclusion = i > firstWildcardIndex && expression.charAt(0) == '-'; - if (i == firstWildcardIndex || isWildcard(expression)) { - Stream matchingResources = matchResourcesToWildcard( - context, - isExclusion ? expression.substring(1) : expression - ); - Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); - if (emptyWildcardExpansion != null) { - emptyWildcardExpansion.set(true); - matchingOpenClosedNames = matchingOpenClosedNames.peek(x -> emptyWildcardExpansion.set(false)); - } - if (isExclusion) { - matchingOpenClosedNames.forEach(result::remove); - } else { - matchingOpenClosedNames.forEach(result::add); - } - if (emptyWildcardExpansion != null && emptyWildcardExpansion.get()) { - throw notFoundException(expression); - } - } else { - if (isExclusion) { - result.remove(expression.substring(1)); - } else { - result.add(expression); - } - } - } - return result; - } - private static IndexMetadata.State excludeState(IndicesOptions options) { final IndexMetadata.State excludeState; if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { @@ -1366,55 +1380,82 @@ private static IndexMetadata.State excludeState(IndicesOptions options) { } /** - * Given a single wildcard {@param expression}, return the {@code Stream} that contains all the resources (i.e. indices, aliases, - * and datastreams), that exist in the cluster at this moment in time, and that the wildcard "resolves" to (i.e. the resource's + * Given a single wildcard {@param expression}, return a {@code Set} that contains all the resources (i.e. indices, aliases, + * and data streams), that exist in the cluster at this moment in time, and that the wildcard "resolves" to (i.e. the resource's * name matches the {@param expression} wildcard). * The {@param context} provides the current time-snapshot view of cluster state, as well as conditions - * on whether to consider alias, datastream, system, and hidden resources. - * It does NOT consider the open or closed status of index resources. + * on whether to consider alias, data stream, system, and hidden resources. */ - private static Stream matchResourcesToWildcard(Context context, String wildcardExpression) { + static Set matchWildcardToResources(Context context, String wildcardExpression) { assert isWildcard(wildcardExpression); final SortedMap indicesLookup = context.getState().getMetadata().getIndicesLookup(); - Stream matchesStream; + Set matchedResources = new HashSet<>(); + // this applies an initial pre-filtering in the case where the expression is a common suffix wildcard, eg "test*" if (Regex.isSuffixMatchPattern(wildcardExpression)) { - // this is an initial pre-filtering in the case where the expression is a common suffix wildcard, eg "test*" - matchesStream = filterIndicesLookupForSuffixWildcard(indicesLookup, wildcardExpression).values().stream(); - } else { - matchesStream = indicesLookup.values().stream(); - if (Regex.isMatchAllPattern(wildcardExpression) == false) { - matchesStream = matchesStream.filter( - indexAbstraction -> Regex.simpleMatch(wildcardExpression, indexAbstraction.getName()) - ); + for (IndexAbstraction ia : filterIndicesLookupForSuffixWildcard(indicesLookup, wildcardExpression).values()) { + maybeAddToResult(context, wildcardExpression, ia, matchedResources); + } + return matchedResources; + } + // In case of match all it fetches all index abstractions + if (Regex.isMatchAllPattern(wildcardExpression)) { + for (IndexAbstraction ia : indicesLookup.values()) { + maybeAddToResult(context, wildcardExpression, ia, matchedResources); } + return matchedResources; } - if (context.getOptions().ignoreAliases()) { - matchesStream = matchesStream.filter(indexAbstraction -> indexAbstraction.getType() != Type.ALIAS); + for (IndexAbstraction indexAbstraction : indicesLookup.values()) { + if (Regex.simpleMatch(wildcardExpression, indexAbstraction.getName())) { + maybeAddToResult(context, wildcardExpression, indexAbstraction, matchedResources); + } } - if (context.includeDataStreams() == false) { - matchesStream = matchesStream.filter(indexAbstraction -> indexAbstraction.isDataStreamRelated() == false); + return matchedResources; + } + + private static void maybeAddToResult( + Context context, + String wildcardExpression, + IndexAbstraction indexAbstraction, + Set matchedResources + ) { + if (shouldExpandToIndexAbstraction(context, wildcardExpression, indexAbstraction)) { + matchedResources.addAll(expandToOpenClosed(context, indexAbstraction)); } - // historic, i.e. not net-new, system indices are included irrespective of the system access predicate - // the system access predicate is based on the endpoint kind and HTTP request headers that identify the stack feature - matchesStream = matchesStream.filter( - indexAbstraction -> indexAbstraction.isSystem() == false - || (indexAbstraction.getType() != Type.DATA_STREAM - && indexAbstraction.getParentDataStream() == null - && context.netNewSystemIndexPredicate.test(indexAbstraction.getName()) == false) - || context.systemIndexAccessPredicate.test(indexAbstraction.getName()) - ); + } + + /** + * Checks if this index abstraction should be included because it matched the wildcard expression. + * @param context the options of this request that influence the decision if this index abstraction should be included in the result + * @param wildcardExpression the wildcard expression that matched this index abstraction + * @param indexAbstraction the index abstraction in question + * @return true, if the index abstraction should be included in the result + */ + private static boolean shouldExpandToIndexAbstraction( + Context context, + String wildcardExpression, + IndexAbstraction indexAbstraction + ) { + if (context.getOptions().ignoreAliases() && indexAbstraction.getType() == Type.ALIAS) { + return false; + } + if (context.includeDataStreams() == false && indexAbstraction.isDataStreamRelated()) { + return false; + } + + if (indexAbstraction.isSystem() + && SystemResourceAccess.shouldExpandToSystemIndexAbstraction(context, indexAbstraction) == false) { + return false; + } + if (context.getOptions().expandWildcardsHidden() == false) { - if (wildcardExpression.startsWith(".")) { - // there is this behavior that hidden indices that start with "." are not hidden if the wildcard expression also - // starts with "." - matchesStream = matchesStream.filter( - indexAbstraction -> indexAbstraction.isHidden() == false || indexAbstraction.getName().startsWith(".") - ); - } else { - matchesStream = matchesStream.filter(indexAbstraction -> indexAbstraction.isHidden() == false); + // there is this behavior that hidden indices that start with "." are not hidden if the wildcard expression also + // starts with "." + if (indexAbstraction.isHidden() + && (wildcardExpression.startsWith(".") && indexAbstraction.getName().startsWith(".")) == false) { + return false; } } - return matchesStream; + return true; } private static Map filterIndicesLookupForSuffixWildcard( @@ -1430,35 +1471,39 @@ private static Map filterIndicesLookupForSuffixWildcar } /** - * Return the {@code Stream} of open and/or closed index names for the given {@param resources}. + * Return the {@code Set} of open and/or closed index names for the given {@param resources}. * Data streams and aliases are interpreted to refer to multiple indices, * then all index resources are filtered by their open/closed status. */ - private static Stream expandToOpenClosed(Context context, Stream resources) { + private static Set expandToOpenClosed(Context context, IndexAbstraction indexAbstraction) { final IndexMetadata.State excludeState = excludeState(context.getOptions()); - return resources.flatMap(indexAbstraction -> { - if (context.isPreserveAliases() && indexAbstraction.getType() == Type.ALIAS) { - return Stream.of(indexAbstraction.getName()); - } else if (context.isPreserveDataStreams() && indexAbstraction.getType() == Type.DATA_STREAM) { - return Stream.of(indexAbstraction.getName()); - } else { - Stream indicesStateStream = Stream.of(); - if (shouldIncludeRegularIndices(context.getOptions())) { - indicesStateStream = indexAbstraction.getIndices().stream().map(context.state.metadata()::index); - } - if (indexAbstraction.getType() == Type.DATA_STREAM && shouldIncludeFailureIndices(context.getOptions())) { - DataStream dataStream = (DataStream) indexAbstraction; - indicesStateStream = Stream.concat( - indicesStateStream, - dataStream.getFailureIndices().getIndices().stream().map(context.state.metadata()::index) - ); + Set resources = new HashSet<>(); + if (context.isPreserveAliases() && indexAbstraction.getType() == Type.ALIAS) { + resources.add(indexAbstraction.getName()); + } else if (context.isPreserveDataStreams() && indexAbstraction.getType() == Type.DATA_STREAM) { + resources.add(indexAbstraction.getName()); + } else { + if (shouldIncludeRegularIndices(context.getOptions())) { + for (int i = 0, n = indexAbstraction.getIndices().size(); i < n; i++) { + Index index = indexAbstraction.getIndices().get(i); + IndexMetadata indexMetadata = context.state.metadata().index(index); + if (indexMetadata.getState() != excludeState) { + resources.add(index.getName()); + } } - if (excludeState != null) { - indicesStateStream = indicesStateStream.filter(indexMeta -> indexMeta.getState() != excludeState); + } + if (indexAbstraction.getType() == Type.DATA_STREAM && shouldIncludeFailureIndices(context.getOptions())) { + DataStream dataStream = (DataStream) indexAbstraction; + for (int i = 0, n = dataStream.getFailureIndices().getIndices().size(); i < n; i++) { + Index index = dataStream.getFailureIndices().getIndices().get(i); + IndexMetadata indexMetadata = context.state.metadata().index(index); + if (indexMetadata.getState() != excludeState) { + resources.add(index.getName()); + } } - return indicesStateStream.map(indexMeta -> indexMeta.getIndex().getName()); } - }); + } + return resources; } private static List resolveEmptyOrTrivialWildcard(Context context) { @@ -1471,26 +1516,26 @@ private static List resolveEmptyOrTrivialWildcard(Context context) { } private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(Context context, String[] allIndices) { - return Arrays.stream(allIndices).filter(name -> { - if (name.startsWith(".")) { - IndexAbstraction abstraction = context.state.metadata().getIndicesLookup().get(name); - assert abstraction != null : "null abstraction for " + name + " but was in array of all indices"; - if (abstraction.isSystem()) { - if (context.netNewSystemIndexPredicate.test(name)) { - if (SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY.equals(context.systemIndexAccessLevel)) { - return false; - } else { - return context.systemIndexAccessPredicate.test(name); - } - } else if (abstraction.getType() == Type.DATA_STREAM || abstraction.getParentDataStream() != null) { - return context.systemIndexAccessPredicate.test(name); - } - } else { - return true; - } + List filteredIndices = new ArrayList<>(allIndices.length); + for (int i = 0; i < allIndices.length; i++) { + if (shouldIncludeIndexAbstraction(context, allIndices[i])) { + filteredIndices.add(allIndices[i]); } + } + return filteredIndices; + } + + private static boolean shouldIncludeIndexAbstraction(Context context, String name) { + if (name.startsWith(".") == false) { return true; - }).toList(); + } + + IndexAbstraction abstraction = context.state.metadata().getIndicesLookup().get(name); + assert abstraction != null : "null abstraction for " + name + " but was in array of all indices"; + if (abstraction.isSystem() == false) { + return true; + } + return SystemResourceAccess.isSystemIndexAbstractionAccessible(context, abstraction); } private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions options, Metadata metadata) { @@ -1513,8 +1558,39 @@ private static String[] resolveEmptyOrTrivialWildcardToAllIndices(IndicesOptions return Strings.EMPTY_ARRAY; } } + + static boolean isWildcard(String expression) { + return Regex.isSimpleMatchPattern(expression); + } + + static boolean hasWildcards(String[] expressions) { + for (int i = 0; i < expressions.length; i++) { + if (isWildcard(expressions[i])) { + return true; + } + } + return false; + } + } + + /** + * @return If the specified string is data math expression then this method returns the resolved expression. + */ + public static String resolveDateMathExpression(String dateExpression) { + return DateMathExpressionResolver.resolveExpression(dateExpression); + } + + /** + * @param time instant to consider when parsing the expression + * @return If the specified string is data math expression then this method returns the resolved expression. + */ + public static String resolveDateMathExpression(String dateExpression, long time) { + return DateMathExpressionResolver.resolveExpression(dateExpression, () -> time); } + /** + * Resolves a date math expression based on the requested time. + */ public static final class DateMathExpressionResolver { private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); @@ -1530,35 +1606,18 @@ private DateMathExpressionResolver() { } /** - * Resolves date math expressions. If this is a noop the given {@code expressions} list is returned without copying. - * As a result callers of this method should not mutate the returned list. Mutating it may come with unexpected side effects. + * Resolves a date math expression using the current time. This method recognises a date math expression iff when they start with + * %3C and end with %3E. Otherwise, it returns the expression intact. */ - public static List resolve(Context context, List expressions) { - boolean wildcardSeen = false; - final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); - String[] result = null; - for (int i = 0, n = expressions.size(); i < n; i++) { - String expression = expressions.get(i); - // accepts date-math exclusions that are of the form "-<...{}>",f i.e. the "-" is outside the "<>" date-math template - boolean isExclusion = wildcardSeen && expression.startsWith("-"); - wildcardSeen = wildcardSeen || (expandWildcards && isWildcard(expression)); - String toResolve = isExclusion ? expression.substring(1) : expression; - String resolved = resolveExpression(toResolve, context::getStartTime); - if (toResolve != resolved) { - if (result == null) { - result = expressions.toArray(Strings.EMPTY_ARRAY); - } - result[i] = isExclusion ? "-" + resolved : resolved; - } - } - return result == null ? expressions : Arrays.asList(result); - } - - static String resolveExpression(String expression) { + public static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } - static String resolveExpression(String expression, LongSupplier getTime) { + /** + * Resolves a date math expression using the provided time. This method recognises a date math expression iff when they start with + * %3C and end with %3E. Otherwise, it returns the expression intact. + */ + public static String resolveExpression(String expression, LongSupplier getTime) { if (expression.startsWith(EXPRESSION_LEFT_BOUND) == false || expression.endsWith(EXPRESSION_RIGHT_BOUND) == false) { return expression; } @@ -1707,135 +1766,133 @@ private static String doResolveExpression(String expression, LongSupplier getTim } } - public static final class ExplicitResourceNameFilter { + /** + * In this class we collect the system access relevant code. The helper methods provide the following functionalities: + * - determining the access to a system index abstraction + * - verifying the access to system abstractions and adding the necessary warnings + * - determining the access to a system index based on its name + * WARNING: we have observed differences in how the access is determined. For now this behaviour is documented and preserved. + */ + public static final class SystemResourceAccess { - private ExplicitResourceNameFilter() { + private SystemResourceAccess() { // Utility class } /** - * Returns an expression list with "unavailable" (missing or not acceptable) resource names filtered out. - * Only explicit resource names are considered for filtering. Wildcard and exclusion expressions are kept in. + * Checks if this system index abstraction should be included when resolving via {@link + * IndexNameExpressionResolver.WildcardExpressionResolver#resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(Context, String[])}. + * NOTE: it behaves differently than {@link SystemResourceAccess#shouldExpandToSystemIndexAbstraction(Context, IndexAbstraction)} + * because in the case that the access level is BACKWARDS_COMPATIBLE_ONLY it does not include the net-new indices, this is + * questionable. */ - public static List filterUnavailable(Context context, List expressions) { - ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); - final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); - boolean wildcardSeen = false; - List result = null; - for (int i = 0; i < expressions.size(); i++) { - String expression = expressions.get(i); - if (Strings.isEmpty(expression)) { - throw notFoundException(expression); - } - // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API - // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, - // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown - // if the expression can't be found. - if (expression.charAt(0) == '_') { - throw new InvalidIndexNameException(expression, "must not start with '_'."); - } - final boolean isWildcard = expandWildcards && isWildcard(expression); - if (isWildcard || (wildcardSeen && expression.charAt(0) == '-') || ensureAliasOrIndexExists(context, expression)) { - if (result != null) { - result.add(expression); - } + public static boolean isSystemIndexAbstractionAccessible(Context context, IndexAbstraction abstraction) { + assert abstraction.isSystem() : "We should only check this for system resources"; + if (context.netNewSystemIndexPredicate.test(abstraction.getName())) { + if (SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY.equals(context.systemIndexAccessLevel)) { + return false; } else { - if (result == null) { - result = new ArrayList<>(expressions.size() - 1); - result.addAll(expressions.subList(0, i)); - } + return context.systemIndexAccessPredicate.test(abstraction.getName()); } - wildcardSeen |= isWildcard; + } else if (abstraction.getType() == Type.DATA_STREAM || abstraction.getParentDataStream() != null) { + return context.systemIndexAccessPredicate.test(abstraction.getName()); } - return result == null ? expressions : result; + return true; } /** - * This returns `true` if the given {@param name} is of a resource that exists. - * Otherwise, it returns `false` if the `ignore_unvailable` option is `true`, or, if `false`, it throws a "not found" type of - * exception. + * Historic, i.e. not net-new, system indices are included irrespective of the system access predicate + * the system access predicate is based on the endpoint kind and HTTP request headers that identify the stack feature. + * A historic system resource, can only be an index since system data streams were added later. */ - @Nullable - private static boolean ensureAliasOrIndexExists(Context context, String name) { - boolean ignoreUnavailable = context.getOptions().ignoreUnavailable(); - IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(name); - if (indexAbstraction == null) { - if (ignoreUnavailable) { - return false; - } else { - throw notFoundException(name); - } - } - // treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) - if (indexAbstraction.getType() == Type.ALIAS && context.getOptions().ignoreAliases()) { - if (ignoreUnavailable) { - return false; - } else { - throw aliasesNotSupportedException(name); - } - } - if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { - if (ignoreUnavailable) { - return false; - } else { - IndexNotFoundException infe = notFoundException(name); - // Allows callers to handle IndexNotFoundException differently based on whether data streams were excluded. - infe.addMetadata(EXCLUDED_DATA_STREAMS_KEY, "true"); - throw infe; - } - } - return true; + private static boolean shouldExpandToSystemIndexAbstraction(Context context, IndexAbstraction indexAbstraction) { + assert indexAbstraction.isSystem() : "We should only check this for system resources"; + boolean isHistoric = indexAbstraction.getType() != Type.DATA_STREAM + && indexAbstraction.getParentDataStream() == null + && context.netNewSystemIndexPredicate.test(indexAbstraction.getName()) == false; + return isHistoric || context.systemIndexAccessPredicate.test(indexAbstraction.getName()); } - private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { - if (options.ignoreUnavailable()) { + /** + * Checks if any system indices that should not have been accessible according to the + * {@link Context#getSystemIndexAccessPredicate()} are accessed, and it performs the following actions: + * - if there are historic (aka not net-new) system indices, then it adds a deprecation warning + * - if it contains net-new system indices or system data streams, it throws an exception. + */ + private static void checkSystemIndexAccess(Context context, ThreadContext threadContext, Index... concreteIndices) { + final Predicate systemIndexAccessPredicate = context.getSystemIndexAccessPredicate(); + if (systemIndexAccessPredicate == Predicates.always()) { return; } - for (String index : indexExpressions) { - if (RemoteClusterAware.isRemoteIndexName(index)) { - failOnRemoteIndicesNotIgnoringUnavailable(indexExpressions); - } - } + doCheckSystemIndexAccess(context, systemIndexAccessPredicate, threadContext, concreteIndices); } - private static void failOnRemoteIndicesNotIgnoringUnavailable(List indexExpressions) { - List crossClusterIndices = new ArrayList<>(); - for (String index : indexExpressions) { - if (RemoteClusterAware.isRemoteIndexName(index)) { - crossClusterIndices.add(index); + private static void doCheckSystemIndexAccess( + Context context, + Predicate systemIndexAccessPredicate, + ThreadContext threadContext, + Index... concreteIndices + ) { + final Metadata metadata = context.getState().metadata(); + final List resolvedSystemIndices = new ArrayList<>(); + final List resolvedNetNewSystemIndices = new ArrayList<>(); + final Set resolvedSystemDataStreams = new HashSet<>(); + final SortedMap indicesLookup = metadata.getIndicesLookup(); + boolean matchedIndex = false; + for (int i = 0; i < concreteIndices.length; i++) { + Index concreteIndex = concreteIndices[i]; + IndexMetadata idxMetadata = metadata.index(concreteIndex); + String name = concreteIndex.getName(); + if (idxMetadata.isSystem() && systemIndexAccessPredicate.test(name) == false) { + matchedIndex = true; + IndexAbstraction indexAbstraction = indicesLookup.get(name); + if (indexAbstraction.getParentDataStream() != null) { + resolvedSystemDataStreams.add(indexAbstraction.getParentDataStream().getName()); + } else if (context.netNewSystemIndexPredicate.test(name)) { + resolvedNetNewSystemIndices.add(name); + } else { + resolvedSystemIndices.add(name); + } } } - throw new IllegalArgumentException( - "Cross-cluster calls are not supported in this context but remote indices were requested: " + crossClusterIndices - ); - } - } - - /** - * This is a context for the DateMathExpressionResolver which does not require {@code IndicesOptions} or {@code ClusterState} - * since it uses only the start time to resolve expressions. - */ - public static final class ResolverContext extends Context { - public ResolverContext() { - this(System.currentTimeMillis()); - } - - public ResolverContext(long startTime) { - super(null, null, startTime, false, false, false, false, SystemIndexAccessLevel.ALL, Predicates.never(), Predicates.never()); + if (matchedIndex) { + handleMatchedSystemIndices(resolvedSystemIndices, resolvedSystemDataStreams, resolvedNetNewSystemIndices, threadContext); + } } - @Override - public ClusterState getState() { - throw new UnsupportedOperationException("should never be called"); + private static void handleMatchedSystemIndices( + List resolvedSystemIndices, + Set resolvedSystemDataStreams, + List resolvedNetNewSystemIndices, + ThreadContext threadContext + ) { + if (resolvedSystemIndices.isEmpty() == false) { + Collections.sort(resolvedSystemIndices); + deprecationLogger.warn( + DeprecationCategory.API, + "open_system_index_access", + "this request accesses system indices: {}, but in a future major version, direct access to system " + + "indices will be prevented by default", + resolvedSystemIndices + ); + } + if (resolvedSystemDataStreams.isEmpty() == false) { + throw SystemIndices.dataStreamAccessException(threadContext, resolvedSystemDataStreams); + } + if (resolvedNetNewSystemIndices.isEmpty() == false) { + throw SystemIndices.netNewSystemIndexAccessException(threadContext, resolvedNetNewSystemIndices); + } } - @Override - public IndicesOptions getOptions() { - throw new UnsupportedOperationException("should never be called"); + /** + * Used in {@link IndexNameExpressionResolver#shouldTrackConcreteIndex(Context, Index)} to exclude net-new indices + * when we are in backwards compatible only access level. + * This also feels questionable as well. + */ + private static boolean isNetNewInBackwardCompatibleMode(Context context, Index index) { + return context.systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY + && context.netNewSystemIndexPredicate.test(index.getName()); } } - private static boolean isWildcard(String expression) { - return Regex.isSimpleMatchPattern(expression); - } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 60dee79d403d6..a4fa139043e50 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -13,7 +13,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; @@ -24,7 +23,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.coordination.CoordinationMetadata; -import org.elasticsearch.cluster.coordination.PublicationTransportHandler; import org.elasticsearch.cluster.metadata.IndexAbstraction.ConcreteIndex; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Strings; @@ -1495,10 +1493,7 @@ public Diff diff(Metadata previousState) { } public static Diff readDiffFrom(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(MetadataDiff.NOOP_METADATA_DIFF_VERSION) && in.readBoolean()) { - return SimpleDiffable.empty(); - } - return new MetadataDiff(in); + return in.readBoolean() ? SimpleDiffable.empty() : new MetadataDiff(in); } public static Metadata fromXContent(XContentParser parser) throws IOException { @@ -1552,10 +1547,6 @@ public Map getMappingsByHash() { private static class MetadataDiff implements Diff { - private static final TransportVersion NOOP_METADATA_DIFF_VERSION = TransportVersions.V_8_5_0; - private static final TransportVersion NOOP_METADATA_DIFF_SAFE_VERSION = - PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION; - private final long version; private final String clusterUUID; private final boolean clusterUUIDCommitted; @@ -1620,36 +1611,19 @@ private MetadataDiff(StreamInput in) throws IOException { coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); - } else { - hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; - } + hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { - reservedStateMetadata = DiffableUtils.readJdkMapDiff( - in, - DiffableUtils.getStringKeySerializer(), - RESERVED_DIFF_VALUE_READER - ); - } else { - reservedStateMetadata = DiffableUtils.emptyDiff(); - } + reservedStateMetadata = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), RESERVED_DIFF_VALUE_READER); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(NOOP_METADATA_DIFF_SAFE_VERSION)) { - out.writeBoolean(empty); - if (empty) { - // noop diff - return; - } - } else if (out.getTransportVersion().onOrAfter(NOOP_METADATA_DIFF_VERSION)) { - // noops are not safe with these versions, see #92259 - out.writeBoolean(false); + out.writeBoolean(empty); + if (empty) { + // noop diff + return; } out.writeString(clusterUUID); out.writeBoolean(clusterUUIDCommitted); @@ -1657,15 +1631,11 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); transientSettings.writeTo(out); persistentSettings.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { - reservedStateMetadata.writeTo(out); - } + reservedStateMetadata.writeTo(out); } @Override @@ -1696,8 +1666,6 @@ public Metadata apply(Metadata part) { } } - public static final TransportVersion MAPPINGS_AS_HASH_VERSION = TransportVersions.V_8_1_0; - public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); @@ -1706,17 +1674,11 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); - } + builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); final Function mappingLookup; - if (in.getTransportVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION)) { - final Map mappingMetadataMap = in.readMapValues(MappingMetadata::new, MappingMetadata::getSha256); - if (mappingMetadataMap.size() > 0) { - mappingLookup = mappingMetadataMap::get; - } else { - mappingLookup = null; - } + final Map mappingMetadataMap = in.readMapValues(MappingMetadata::new, MappingMetadata::getSha256); + if (mappingMetadataMap.isEmpty() == false) { + mappingLookup = mappingMetadataMap::get; } else { mappingLookup = null; } @@ -1733,11 +1695,9 @@ public static Metadata readFrom(StreamInput in) throws IOException { Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { - int reservedStateSize = in.readVInt(); - for (int i = 0; i < reservedStateSize; i++) { - builder.put(ReservedStateMetadata.readFrom(in)); - } + int reservedStateSize = in.readVInt(); + for (int i = 0; i < reservedStateSize; i++) { + builder.put(ReservedStateMetadata.readFrom(in)); } return builder.build(); } @@ -1750,24 +1710,15 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); transientSettings.writeTo(out); persistentSettings.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } - // Starting in #MAPPINGS_AS_HASH_VERSION we write the mapping metadata first and then write the indices without metadata so that - // we avoid writing duplicate mappings twice - if (out.getTransportVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION)) { - out.writeMapValues(mappingsByHash); - } + hashesOfConsistentSettings.writeTo(out); + out.writeMapValues(mappingsByHash); out.writeVInt(indices.size()); - final boolean writeMappingsHash = out.getTransportVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION); for (IndexMetadata indexMetadata : this) { - indexMetadata.writeTo(out, writeMappingsHash); + indexMetadata.writeTo(out, true); } out.writeCollection(templates.values()); VersionedNamedWriteable.writeVersionedWritables(out, customs); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { - out.writeCollection(reservedStateMetadata.values()); - } + out.writeCollection(reservedStateMetadata.values()); } public static Builder builder() { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 1f014a526b9a6..52e4d75ac5116 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; @@ -127,6 +128,16 @@ public class MetadataCreateIndexService { public static final int MAX_INDEX_NAME_BYTES = 255; + /** + * Name of the setting used to allow blocking refreshes on newly created indices. + */ + public static final String USE_INDEX_REFRESH_BLOCK_SETTING_NAME = "stateless.indices.use_refresh_block_upon_index_creation"; + + @FunctionalInterface + interface ClusterBlocksTransformer { + void apply(ClusterBlocks.Builder clusterBlocks, IndexMetadata indexMetadata, TransportVersion minClusterTransportVersion); + } + private final Settings settings; private final ClusterService clusterService; private final IndicesService indicesService; @@ -139,6 +150,7 @@ public class MetadataCreateIndexService { private final boolean forbidPrivateIndexSettings; private final Set indexSettingProviders; private final ThreadPool threadPool; + private final ClusterBlocksTransformer blocksTransformerUponIndexCreation; public MetadataCreateIndexService( final Settings settings, @@ -166,6 +178,7 @@ public MetadataCreateIndexService( this.shardLimitValidator = shardLimitValidator; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.threadPool = threadPool; + this.blocksTransformerUponIndexCreation = createClusterBlocksTransformerForIndexCreation(settings); } /** @@ -540,8 +553,10 @@ private ClusterState applyCreateIndexWithTemporaryService( currentState, indexMetadata, metadataTransformer, + blocksTransformerUponIndexCreation, allocationService.getShardRoutingRoleStrategy() ); + assert assertHasRefreshBlock(indexMetadata, updated, updated.getMinTransportVersion()); if (request.performReroute()) { updated = allocationService.reroute(updated, "index [" + indexMetadata.getIndex().getName() + "] created", rerouteListener); } @@ -1294,6 +1309,7 @@ static ClusterState clusterStateCreateIndex( ClusterState currentState, IndexMetadata indexMetadata, BiConsumer metadataTransformer, + ClusterBlocksTransformer blocksTransformer, ShardRoutingRoleStrategy shardRoutingRoleStrategy ) { final Metadata newMetadata; @@ -1307,6 +1323,9 @@ static ClusterState clusterStateCreateIndex( var blocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); blocksBuilder.updateBlocks(indexMetadata); + if (blocksTransformer != null) { + blocksTransformer.apply(blocksBuilder, indexMetadata, currentState.getMinTransportVersion()); + } var routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, currentState.routingTable()) .addAsNew(newMetadata.index(indexMetadata.getIndex().getName())); @@ -1745,4 +1764,39 @@ public static void validateStoreTypeSetting(Settings indexSettings) { ); } } + + private static boolean useRefreshBlock(Settings settings) { + return DiscoveryNode.isStateless(settings) && settings.getAsBoolean(USE_INDEX_REFRESH_BLOCK_SETTING_NAME, false); + } + + static ClusterBlocksTransformer createClusterBlocksTransformerForIndexCreation(Settings settings) { + if (useRefreshBlock(settings) == false) { + return (clusterBlocks, indexMetadata, minClusterTransportVersion) -> {}; + } + logger.debug("applying refresh block on index creation"); + return (clusterBlocks, indexMetadata, minClusterTransportVersion) -> { + if (applyRefreshBlock(indexMetadata, minClusterTransportVersion)) { + // Applies the INDEX_REFRESH_BLOCK to the index. This block will remain in cluster state until an unpromotable shard is + // started or a configurable delay is elapsed. + clusterBlocks.addIndexBlock(indexMetadata.getIndex().getName(), IndexMetadata.INDEX_REFRESH_BLOCK); + } + }; + } + + private static boolean applyRefreshBlock(IndexMetadata indexMetadata, TransportVersion minClusterTransportVersion) { + return 0 < indexMetadata.getNumberOfReplicas() // index has replicas + && indexMetadata.getResizeSourceIndex() == null // index is not a split/shrink index + && indexMetadata.getInSyncAllocationIds().values().stream().allMatch(Set::isEmpty) // index is a new index + && minClusterTransportVersion.onOrAfter(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK); + } + + private boolean assertHasRefreshBlock(IndexMetadata indexMetadata, ClusterState clusterState, TransportVersion minTransportVersion) { + var hasRefreshBlock = clusterState.blocks().hasIndexBlock(indexMetadata.getIndex().getName(), IndexMetadata.INDEX_REFRESH_BLOCK); + if (useRefreshBlock(settings) == false || applyRefreshBlock(indexMetadata, minTransportVersion) == false) { + assert hasRefreshBlock == false : indexMetadata.getIndex(); + } else { + assert hasRefreshBlock : indexMetadata.getIndex(); + } + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java index 89f776a7ada0f..49bd38330e3af 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java @@ -9,24 +9,12 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import java.util.Map; import java.util.Set; public class MetadataFeatures implements FeatureSpecification { - @Override - public Map getHistoricalFeatures() { - return Map.of( - DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED, - Version.V_8_3_0, - DesiredNode.DOUBLE_PROCESSORS_SUPPORTED, - Version.V_8_5_0 - ); - } - @Override public Set getFeatures() { return Set.of(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index d6ed28454df96..3878a3329b634 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -782,7 +782,7 @@ private void validateUseOfDeprecatedIngestPipelines(String name, IngestMetadata private void emitWarningIfPipelineIsDeprecated(String name, Map pipelines, String pipelineName) { Optional.ofNullable(pipelineName) .map(pipelines::get) - .filter(p -> Boolean.TRUE.equals(p.getConfigAsMap().get("deprecated"))) + .filter(p -> Boolean.TRUE.equals(p.getConfig().get("deprecated"))) .ifPresent( p -> deprecationLogger.warn( DeprecationCategory.TEMPLATES, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java index 9b3abf38c519b..0b9c359006b23 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java @@ -46,7 +46,11 @@ public class RepositoryMetadata implements Writeable { * @param settings repository settings */ public RepositoryMetadata(String name, String type, Settings settings) { - this(name, RepositoryData.MISSING_UUID, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN); + this(name, RepositoryData.MISSING_UUID, type, settings); + } + + public RepositoryMetadata(String name, String uuid, String type, Settings settings) { + this(name, uuid, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN); } public RepositoryMetadata(RepositoryMetadata metadata, long generation, long pendingGeneration) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index 1c89d3bf259b5..be0e3429a2ce4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -167,7 +167,7 @@ public void process(IndexRequest indexRequest) { // generate id if not already provided final String id = indexRequest.id(); if (id == null) { - if (creationVersion.onOrAfter(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID) && indexMode == IndexMode.LOGSDB) { + if (shouldUseTimeBasedId(indexMode, creationVersion)) { indexRequest.autoGenerateTimeBasedId(); } else { indexRequest.autoGenerateId(); @@ -177,6 +177,15 @@ public void process(IndexRequest indexRequest) { } } + private static boolean shouldUseTimeBasedId(final IndexMode indexMode, final IndexVersion creationVersion) { + return indexMode == IndexMode.LOGSDB && isNewIndexVersion(creationVersion); + } + + private static boolean isNewIndexVersion(final IndexVersion creationVersion) { + return creationVersion.between(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID_BACKPORT, IndexVersions.UPGRADE_TO_LUCENE_10_0_0) + || creationVersion.onOrAfter(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID); + } + @Override public int indexShard( String id, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 7cb0e457e36c7..bcacf21fcedbf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -241,7 +241,7 @@ public boolean readyForSearch(ClusterState clusterState) { boolean found = false; for (int idx = 0; idx < shardRoutingTable.size(); idx++) { ShardRouting shardRouting = shardRoutingTable.shard(idx); - if (shardRouting.active() && OperationRouting.canSearchShard(shardRouting, clusterState)) { + if (shardRouting.active() && shardRouting.isSearchable()) { found = true; break; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index f7812d284f2af..5e2dbf1c5df5d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -32,8 +32,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; - public class OperationRouting { public static final Setting USE_ADAPTIVE_REPLICA_SELECTION_SETTING = Setting.boolSetting( @@ -150,7 +148,7 @@ private static List statefulShardsThatHandleSearches(ShardIterator } private static List statelessShardsThatHandleSearches(ClusterState clusterState, ShardIterator iterator) { - return iterator.getShardRoutings().stream().filter(shardRouting -> canSearchShard(shardRouting, clusterState)).toList(); + return iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList(); } public static ShardIterator getShards(ClusterState clusterState, ShardId shardId) { @@ -303,12 +301,4 @@ public ShardId shardId(ClusterState clusterState, String index, String id, @Null IndexMetadata indexMetadata = indexMetadata(clusterState, index); return new ShardId(indexMetadata.getIndex(), IndexRouting.fromIndexMetadata(indexMetadata).getShard(id, routing)); } - - public static boolean canSearchShard(ShardRouting shardRouting, ClusterState clusterState) { - if (INDEX_FAST_REFRESH_SETTING.get(clusterState.metadata().index(shardRouting.index()).getSettings())) { - return shardRouting.isPromotableToPrimary(); - } else { - return shardRouting.isSearchable(); - } - } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 319786b558ddd..157d28e61057c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -935,8 +935,7 @@ public boolean isPromotableToPrimary() { } /** - * Determine if role searchable. Consumers should prefer {@link OperationRouting#canSearchShard(ShardRouting, ClusterState)} to - * determine if a shard can be searched and {@link IndexRoutingTable#readyForSearch(ClusterState)} to determine if an index + * Determine if role searchable. Consumers should prefer {@link IndexRoutingTable#readyForSearch(ClusterState)} to determine if an index * is ready to be searched. */ public boolean isSearchable() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java index 0c82faaaeaa45..b98e9050d2b4a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -17,6 +17,7 @@ import java.util.Map; import java.util.function.Supplier; +import java.util.stream.Collectors; public class AllocationStatsService { private final ClusterService clusterService; @@ -39,6 +40,26 @@ public AllocationStatsService( } public Map stats() { - return nodeAllocationStatsProvider.stats(clusterService.state(), clusterInfoService.getClusterInfo(), desiredBalanceSupplier.get()); + var state = clusterService.state(); + var stats = nodeAllocationStatsProvider.stats( + state.metadata(), + state.getRoutingNodes(), + clusterInfoService.getClusterInfo(), + desiredBalanceSupplier.get() + ); + return stats.entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + e -> new NodeAllocationStats( + e.getValue().shards(), + e.getValue().undesiredShards(), + e.getValue().forecastedIngestLoad(), + e.getValue().forecastedDiskUsage(), + e.getValue().currentDiskUsage() + ) + ) + ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java index 157b409be14d3..8368f5916ef91 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java @@ -10,11 +10,15 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; @@ -23,17 +27,47 @@ public class NodeAllocationStatsProvider { private final WriteLoadForecaster writeLoadForecaster; - public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster) { + private volatile float indexBalanceFactor; + private volatile float shardBalanceFactor; + private volatile float writeLoadBalanceFactor; + private volatile float diskUsageBalanceFactor; + + public record NodeAllocationAndClusterBalanceStats( + int shards, + int undesiredShards, + double forecastedIngestLoad, + long forecastedDiskUsage, + long currentDiskUsage, + float currentNodeWeight + ) {} + + public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster, ClusterSettings clusterSettings) { this.writeLoadForecaster = writeLoadForecaster; + clusterSettings.initializeAndWatch(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, value -> this.shardBalanceFactor = value); + clusterSettings.initializeAndWatch(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value); + clusterSettings.initializeAndWatch( + BalancedShardsAllocator.WRITE_LOAD_BALANCE_FACTOR_SETTING, + value -> this.writeLoadBalanceFactor = value + ); + clusterSettings.initializeAndWatch( + BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING, + value -> this.diskUsageBalanceFactor = value + ); } - public Map stats( - ClusterState clusterState, + public Map stats( + Metadata metadata, + RoutingNodes routingNodes, ClusterInfo clusterInfo, @Nullable DesiredBalance desiredBalance ) { - var stats = Maps.newMapWithExpectedSize(clusterState.getRoutingNodes().size()); - for (RoutingNode node : clusterState.getRoutingNodes()) { + var weightFunction = new WeightFunction(shardBalanceFactor, indexBalanceFactor, writeLoadBalanceFactor, diskUsageBalanceFactor); + var avgShardsPerNode = WeightFunction.avgShardPerNode(metadata, routingNodes); + var avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes); + var avgDiskUsageInBytesPerNode = WeightFunction.avgDiskUsageInBytesPerNode(clusterInfo, metadata, routingNodes); + + var stats = Maps.newMapWithExpectedSize(routingNodes.size()); + for (RoutingNode node : routingNodes) { int shards = 0; int undesiredShards = 0; double forecastedWriteLoad = 0.0; @@ -44,7 +78,7 @@ public Map stats( continue; } shards++; - IndexMetadata indexMetadata = clusterState.metadata().getIndexSafe(shardRouting.index()); + IndexMetadata indexMetadata = metadata.getIndexSafe(shardRouting.index()); if (isDesiredAllocation(desiredBalance, shardRouting) == false) { undesiredShards++; } @@ -54,14 +88,23 @@ public Map stats( currentDiskUsage += shardSize; } + float currentNodeWeight = weightFunction.nodeWeight( + shards, + avgShardsPerNode, + forecastedWriteLoad, + avgWriteLoadPerNode, + currentDiskUsage, + avgDiskUsageInBytesPerNode + ); stats.put( node.nodeId(), - new NodeAllocationStats( + new NodeAllocationAndClusterBalanceStats( shards, desiredBalance != null ? undesiredShards : -1, forecastedWriteLoad, forecastedDiskUsage, - currentDiskUsage + currentDiskUsage, + currentNodeWeight ) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 5b8fb0c7e9203..8dd1f14564ce9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -168,14 +168,17 @@ private void collectAndRecordNodeWeightStats(Balancer balancer, WeightFunction w Map nodeLevelWeights = new HashMap<>(); for (var entry : balancer.nodes.entrySet()) { var node = entry.getValue(); + var nodeWeight = weightFunction.nodeWeight( + node.numShards(), + balancer.avgShardsPerNode(), + node.writeLoad(), + balancer.avgWriteLoadPerNode(), + node.diskUsageInBytes(), + balancer.avgDiskUsageInBytesPerNode() + ); nodeLevelWeights.put( node.routingNode.node(), - new DesiredBalanceMetrics.NodeWeightStats( - node.numShards(), - node.diskUsageInBytes(), - node.writeLoad(), - weightFunction.nodeWeight(balancer, node) - ) + new DesiredBalanceMetrics.NodeWeightStats(node.numShards(), node.diskUsageInBytes(), node.writeLoad(), nodeWeight) ); } allocation.routingNodes().setBalanceWeightStatsPerNode(nodeLevelWeights); @@ -252,65 +255,6 @@ public float getShardBalance() { return shardBalanceFactor; } - /** - * This class is the primary weight function used to create balanced over nodes and shards in the cluster. - * Currently this function has 3 properties: - *
    - *
  • index balance - balance property over shards per index
  • - *
  • shard balance - balance property over shards per cluster
  • - *
- *

- * Each of these properties are expressed as factor such that the properties factor defines the relative - * importance of the property for the weight function. For example if the weight function should calculate - * the weights only based on a global (shard) balance the index balance can be set to {@code 0.0} and will - * in turn have no effect on the distribution. - *

- * The weight per index is calculated based on the following formula: - *
    - *
  • - * weightindex(node, index) = indexBalance * (node.numShards(index) - avgShardsPerNode(index)) - *
  • - *
  • - * weightnode(node, index) = shardBalance * (node.numShards() - avgShardsPerNode) - *
  • - *
- * weight(node, index) = weightindex(node, index) + weightnode(node, index) - */ - private static class WeightFunction { - - private final float theta0; - private final float theta1; - private final float theta2; - private final float theta3; - - WeightFunction(float shardBalance, float indexBalance, float writeLoadBalance, float diskUsageBalance) { - float sum = shardBalance + indexBalance + writeLoadBalance + diskUsageBalance; - if (sum <= 0.0f) { - throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); - } - theta0 = shardBalance / sum; - theta1 = indexBalance / sum; - theta2 = writeLoadBalance / sum; - theta3 = diskUsageBalance / sum; - } - - float weight(Balancer balancer, ModelNode node, String index) { - final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); - return nodeWeight(balancer, node) + theta1 * weightIndex; - } - - float nodeWeight(Balancer balancer, ModelNode node) { - final float weightShard = node.numShards() - balancer.avgShardsPerNode(); - final float ingestLoad = (float) (node.writeLoad() - balancer.avgWriteLoadPerNode()); - final float diskUsage = (float) (node.diskUsageInBytes() - balancer.avgDiskUsageInBytesPerNode()); - return theta0 * weightShard + theta2 * ingestLoad + theta3 * diskUsage; - } - - float minWeightDelta(Balancer balancer, String index) { - return theta0 * 1 + theta1 * 1 + theta2 * balancer.getShardWriteLoad(index) + theta3 * balancer.maxShardSizeBytes(index); - } - } - /** * A {@link Balancer} */ @@ -335,63 +279,13 @@ private Balancer(WriteLoadForecaster writeLoadForecaster, RoutingAllocation allo this.metadata = allocation.metadata(); this.weight = weight; this.threshold = threshold; - avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); - avgWriteLoadPerNode = getTotalWriteLoad(writeLoadForecaster, metadata) / routingNodes.size(); - avgDiskUsageInBytesPerNode = ((double) getTotalDiskUsageInBytes(allocation.clusterInfo(), metadata) / routingNodes.size()); + avgShardsPerNode = WeightFunction.avgShardPerNode(metadata, routingNodes); + avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes); + avgDiskUsageInBytesPerNode = WeightFunction.avgDiskUsageInBytesPerNode(allocation.clusterInfo(), metadata, routingNodes); nodes = Collections.unmodifiableMap(buildModelFromAssigned()); sorter = newNodeSorter(); } - private static double getTotalWriteLoad(WriteLoadForecaster writeLoadForecaster, Metadata metadata) { - double writeLoad = 0.0; - for (IndexMetadata indexMetadata : metadata.indices().values()) { - writeLoad += getIndexWriteLoad(writeLoadForecaster, indexMetadata); - } - return writeLoad; - } - - private static double getIndexWriteLoad(WriteLoadForecaster writeLoadForecaster, IndexMetadata indexMetadata) { - var shardWriteLoad = writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); - return shardWriteLoad * numberOfCopies(indexMetadata); - } - - private static long getTotalDiskUsageInBytes(ClusterInfo clusterInfo, Metadata metadata) { - long totalDiskUsageInBytes = 0; - for (IndexMetadata indexMetadata : metadata.indices().values()) { - totalDiskUsageInBytes += getIndexDiskUsageInBytes(clusterInfo, indexMetadata); - } - return totalDiskUsageInBytes; - } - - // Visible for testing - static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata indexMetadata) { - if (indexMetadata.ignoreDiskWatermarks()) { - // disk watermarks are ignored for partial searchable snapshots - // and is equivalent to indexMetadata.isPartialSearchableSnapshot() - return 0; - } - final long forecastedShardSize = indexMetadata.getForecastedShardSizeInBytes().orElse(-1L); - long totalSizeInBytes = 0; - int shardCount = 0; - for (int shard = 0; shard < indexMetadata.getNumberOfShards(); shard++) { - final ShardId shardId = new ShardId(indexMetadata.getIndex(), shard); - final long primaryShardSize = Math.max(forecastedShardSize, clusterInfo.getShardSize(shardId, true, -1L)); - if (primaryShardSize != -1L) { - totalSizeInBytes += primaryShardSize; - shardCount++; - } - final long replicaShardSize = Math.max(forecastedShardSize, clusterInfo.getShardSize(shardId, false, -1L)); - if (replicaShardSize != -1L) { - totalSizeInBytes += replicaShardSize * indexMetadata.getNumberOfReplicas(); - shardCount += indexMetadata.getNumberOfReplicas(); - } - } - if (shardCount == numberOfCopies(indexMetadata)) { - return totalSizeInBytes; - } - return shardCount == 0 ? 0 : (totalSizeInBytes / shardCount) * numberOfCopies(indexMetadata); - } - private static long getShardDiskUsageInBytes(ShardRouting shardRouting, IndexMetadata indexMetadata, ClusterInfo clusterInfo) { if (indexMetadata.ignoreDiskWatermarks()) { // disk watermarks are ignored for partial searchable snapshots @@ -401,10 +295,6 @@ private static long getShardDiskUsageInBytes(ShardRouting shardRouting, IndexMet return Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0L), clusterInfo.getShardSize(shardRouting, 0L)); } - private static int numberOfCopies(IndexMetadata indexMetadata) { - return indexMetadata.getNumberOfShards() * (1 + indexMetadata.getNumberOfReplicas()); - } - private float getShardWriteLoad(String index) { return (float) writeLoadForecaster.getForecastedWriteLoad(metadata.index(index)).orElse(0.0); } @@ -1433,7 +1323,7 @@ public float weight(ModelNode node) { } public float minWeightDelta() { - return function.minWeightDelta(balancer, index); + return function.minWeightDelta(balancer.getShardWriteLoad(index), balancer.maxShardSizeBytes(index)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java index 9de95804b49b2..6ad44fdf3a9c0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java @@ -21,7 +21,7 @@ * * @param assignments a set of the (persistent) node IDs to which each {@link ShardId} should be allocated * @param weightsPerNode The node weights calculated based on - * {@link org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.WeightFunction#nodeWeight} + * {@link org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction#nodeWeight} */ public record DesiredBalance( long lastConvergedIndex, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java index cf8840dc95724..9f6487bdc8abd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -10,7 +10,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider.NodeAllocationAndClusterBalanceStats; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -41,6 +41,7 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w public static final String DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.desired_balance.allocations.node_disk_usage_bytes.current"; + public static final String CURRENT_NODE_WEIGHT_METRIC_NAME = "es.allocator.allocations.node.weight.current"; public static final String CURRENT_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.allocations.node.shard_count.current"; public static final String CURRENT_NODE_WRITE_LOAD_METRIC_NAME = "es.allocator.allocations.node.write_load.current"; public static final String CURRENT_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.allocations.node.disk_usage_bytes.current"; @@ -68,12 +69,13 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w private volatile long undesiredAllocations; private final AtomicReference> weightStatsPerNodeRef = new AtomicReference<>(Map.of()); - private final AtomicReference> allocationStatsPerNodeRef = new AtomicReference<>(Map.of()); + private final AtomicReference> allocationStatsPerNodeRef = + new AtomicReference<>(Map.of()); public void updateMetrics( AllocationStats allocationStats, Map weightStatsPerNode, - Map nodeAllocationStats + Map nodeAllocationStats ) { assert allocationStats != null : "allocation stats cannot be null"; assert weightStatsPerNode != null : "node balance weight stats cannot be null"; @@ -124,6 +126,12 @@ public DesiredBalanceMetrics(MeterRegistry meterRegistry) { "bytes", this::getDesiredBalanceNodeDiskUsageMetrics ); + meterRegistry.registerDoublesGauge( + CURRENT_NODE_WEIGHT_METRIC_NAME, + "The weight of nodes based on the current allocation state", + "unit", + this::getCurrentNodeWeightMetrics + ); meterRegistry.registerLongsGauge( DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME, "Shard count of nodes in the computed desired balance", @@ -291,6 +299,18 @@ private List getCurrentNodeUndesiredShardCountMetrics() { return values; } + private List getCurrentNodeWeightMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).currentNodeWeight(), getNodeAttributes(node))); + } + return doubles; + } + private Map getNodeAttributes(DiscoveryNode node) { return Map.of("node_id", node.getId(), "node_name", node.getName()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 5ad29debc8f20..2ee905634f760 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -20,8 +20,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider.NodeAllocationAndClusterBalanceStats; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -159,8 +159,13 @@ void run() { } private void updateDesireBalanceMetrics(AllocationStats allocationStats) { - var stats = nodeAllocationStatsProvider.stats(allocation.getClusterState(), allocation.clusterInfo(), desiredBalance); - Map nodeAllocationStats = new HashMap<>(stats.size()); + var stats = nodeAllocationStatsProvider.stats( + allocation.metadata(), + allocation.routingNodes(), + allocation.clusterInfo(), + desiredBalance + ); + Map nodeAllocationStats = new HashMap<>(stats.size()); for (var entry : stats.entrySet()) { var node = allocation.nodes().get(entry.getKey()); if (node != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index bfe8a20f18043..72261df658ca1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -131,6 +131,10 @@ protected void processInput(DesiredBalanceInput desiredBalanceInput) { recordTime( cumulativeComputationTime, + // We set currentDesiredBalance back to INITIAL when the node stands down as master in onNoLongerMaster. + // However, it is possible that we revert the effect here by setting it again since the computation is async + // and does not check whether the node is master. This should have little to no practical impact. But it may + // lead to unexpected behaviours for tests. See also https://github.com/elastic/elasticsearch/pull/116904 () -> setCurrentDesiredBalance( desiredBalanceComputer.compute( getInitialDesiredBalance(), @@ -213,6 +217,10 @@ public void allocate(RoutingAllocation allocation, ActionListener listener queue.add(index, listener); desiredBalanceComputation.onNewInput(DesiredBalanceInput.create(index, allocation)); + if (allocation.routingTable().indicesRouting().isEmpty()) { + logger.debug("No eager reconciliation needed for empty routing table"); + return; + } // Starts reconciliation towards desired balance that might have not been updated with a recent calculation yet. // This is fine as balance should have incremental rather than radical changes. // This should speed up achieving the desired balance in cases current state is still different from it (due to THROTTLING). diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/WeightFunction.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/WeightFunction.java new file mode 100644 index 0000000000000..7203a92b147f6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/WeightFunction.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; +import org.elasticsearch.index.shard.ShardId; + +/** + * This class is the primary weight function used to create balanced over nodes and shards in the cluster. + * Currently this function has 3 properties: + *
    + *
  • index balance - balance property over shards per index
  • + *
  • shard balance - balance property over shards per cluster
  • + *
+ *

+ * Each of these properties are expressed as factor such that the properties factor defines the relative + * importance of the property for the weight function. For example if the weight function should calculate + * the weights only based on a global (shard) balance the index balance can be set to {@code 0.0} and will + * in turn have no effect on the distribution. + *

+ * The weight per index is calculated based on the following formula: + *
    + *
  • + * weightindex(node, index) = indexBalance * (node.numShards(index) - avgShardsPerNode(index)) + *
  • + *
  • + * weightnode(node, index) = shardBalance * (node.numShards() - avgShardsPerNode) + *
  • + *
+ * weight(node, index) = weightindex(node, index) + weightnode(node, index) + */ +public class WeightFunction { + + private final float theta0; + private final float theta1; + private final float theta2; + private final float theta3; + + public WeightFunction(float shardBalance, float indexBalance, float writeLoadBalance, float diskUsageBalance) { + float sum = shardBalance + indexBalance + writeLoadBalance + diskUsageBalance; + if (sum <= 0.0f) { + throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); + } + theta0 = shardBalance / sum; + theta1 = indexBalance / sum; + theta2 = writeLoadBalance / sum; + theta3 = diskUsageBalance / sum; + } + + float weight(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, String index) { + final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); + final float nodeWeight = nodeWeight( + node.numShards(), + balancer.avgShardsPerNode(), + node.writeLoad(), + balancer.avgWriteLoadPerNode(), + node.diskUsageInBytes(), + balancer.avgDiskUsageInBytesPerNode() + ); + return nodeWeight + theta1 * weightIndex; + } + + public float nodeWeight( + int nodeNumShards, + float avgShardsPerNode, + double nodeWriteLoad, + double avgWriteLoadPerNode, + double diskUsageInBytes, + double avgDiskUsageInBytesPerNode + ) { + final float weightShard = nodeNumShards - avgShardsPerNode; + final float ingestLoad = (float) (nodeWriteLoad - avgWriteLoadPerNode); + final float diskUsage = (float) (diskUsageInBytes - avgDiskUsageInBytesPerNode); + return theta0 * weightShard + theta2 * ingestLoad + theta3 * diskUsage; + } + + float minWeightDelta(float shardWriteLoad, float shardSizeBytes) { + return theta0 * 1 + theta1 * 1 + theta2 * shardWriteLoad + theta3 * shardSizeBytes; + } + + public static float avgShardPerNode(Metadata metadata, RoutingNodes routingNodes) { + return ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); + } + + public static double avgWriteLoadPerNode(WriteLoadForecaster writeLoadForecaster, Metadata metadata, RoutingNodes routingNodes) { + return getTotalWriteLoad(writeLoadForecaster, metadata) / routingNodes.size(); + } + + public static double avgDiskUsageInBytesPerNode(ClusterInfo clusterInfo, Metadata metadata, RoutingNodes routingNodes) { + return ((double) getTotalDiskUsageInBytes(clusterInfo, metadata) / routingNodes.size()); + } + + private static double getTotalWriteLoad(WriteLoadForecaster writeLoadForecaster, Metadata metadata) { + double writeLoad = 0.0; + for (IndexMetadata indexMetadata : metadata.indices().values()) { + writeLoad += getIndexWriteLoad(writeLoadForecaster, indexMetadata); + } + return writeLoad; + } + + private static double getIndexWriteLoad(WriteLoadForecaster writeLoadForecaster, IndexMetadata indexMetadata) { + var shardWriteLoad = writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + return shardWriteLoad * numberOfCopies(indexMetadata); + } + + private static int numberOfCopies(IndexMetadata indexMetadata) { + return indexMetadata.getNumberOfShards() * (1 + indexMetadata.getNumberOfReplicas()); + } + + private static long getTotalDiskUsageInBytes(ClusterInfo clusterInfo, Metadata metadata) { + long totalDiskUsageInBytes = 0; + for (IndexMetadata indexMetadata : metadata.indices().values()) { + totalDiskUsageInBytes += getIndexDiskUsageInBytes(clusterInfo, indexMetadata); + } + return totalDiskUsageInBytes; + } + + // Visible for testing + static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata indexMetadata) { + if (indexMetadata.ignoreDiskWatermarks()) { + // disk watermarks are ignored for partial searchable snapshots + // and is equivalent to indexMetadata.isPartialSearchableSnapshot() + return 0; + } + final long forecastedShardSize = indexMetadata.getForecastedShardSizeInBytes().orElse(-1L); + long totalSizeInBytes = 0; + int shardCount = 0; + for (int shard = 0; shard < indexMetadata.getNumberOfShards(); shard++) { + final ShardId shardId = new ShardId(indexMetadata.getIndex(), shard); + final long primaryShardSize = Math.max(forecastedShardSize, clusterInfo.getShardSize(shardId, true, -1L)); + if (primaryShardSize != -1L) { + totalSizeInBytes += primaryShardSize; + shardCount++; + } + final long replicaShardSize = Math.max(forecastedShardSize, clusterInfo.getShardSize(shardId, false, -1L)); + if (replicaShardSize != -1L) { + totalSizeInBytes += replicaShardSize * indexMetadata.getNumberOfReplicas(); + shardCount += indexMetadata.getNumberOfReplicas(); + } + } + if (shardCount == numberOfCopies(indexMetadata)) { + return totalSizeInBytes; + } + return shardCount == 0 ? 0 : (totalSizeInBytes / shardCount) * numberOfCopies(indexMetadata); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java deleted file mode 100644 index 6e0a8afd6cf8e..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportFeatures.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.service; - -import org.elasticsearch.Version; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Map; - -public class TransportFeatures implements FeatureSpecification { - @Override - public Map getHistoricalFeatures() { - // transport version was introduced in 8.8.0, but we need to wait until all nodes are >8.8.0 - // to properly detect when we need to fix transport versions - return Map.of(TransportVersionsFixupListener.FIX_TRANSPORT_VERSION, Version.V_8_8_1); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java deleted file mode 100644 index 0ae0f8b10aed7..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.service; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.client.internal.ClusterAdminClient; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.version.CompatibilityVersions; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.logging.LogManager; -import org.elasticsearch.logging.Logger; -import org.elasticsearch.threadpool.Scheduler; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Collections; -import java.util.HashSet; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.Executor; -import java.util.stream.Collectors; - -import static org.elasticsearch.cluster.ClusterState.INFERRED_TRANSPORT_VERSION; - -/** - * This fixes up the transport version from pre-8.8.0 cluster state that was inferred as the minimum possible, - * due to the master node not understanding cluster state with transport versions added in 8.8.0. - * Any nodes with the inferred placeholder cluster state is then refreshed with their actual transport version - */ -@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // this can be removed in v9 -public class TransportVersionsFixupListener implements ClusterStateListener { - - private static final Logger logger = LogManager.getLogger(TransportVersionsFixupListener.class); - - static final NodeFeature FIX_TRANSPORT_VERSION = new NodeFeature("transport.fix_transport_version"); - - private static final TimeValue RETRY_TIME = TimeValue.timeValueSeconds(30); - - private final MasterServiceTaskQueue taskQueue; - private final ClusterAdminClient client; - private final Scheduler scheduler; - private final Executor executor; - private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); - private final FeatureService featureService; - - public TransportVersionsFixupListener( - ClusterService service, - ClusterAdminClient client, - FeatureService featureService, - ThreadPool threadPool - ) { - // there tends to be a lot of state operations on an upgrade - this one is not time-critical, - // so use LOW priority. It just needs to be run at some point after upgrade. - this( - service.createTaskQueue("fixup-transport-versions", Priority.LOW, new TransportVersionUpdater()), - client, - featureService, - threadPool, - threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) - ); - } - - TransportVersionsFixupListener( - MasterServiceTaskQueue taskQueue, - ClusterAdminClient client, - FeatureService featureService, - Scheduler scheduler, - Executor executor - ) { - this.taskQueue = taskQueue; - this.client = client; - this.featureService = featureService; - this.scheduler = scheduler; - this.executor = executor; - } - - class NodeTransportVersionTask implements ClusterStateTaskListener { - private final Map results; - private final int retryNum; - - NodeTransportVersionTask(Map results, int retryNum) { - this.results = results; - this.retryNum = retryNum; - } - - @Override - public void onFailure(Exception e) { - logger.error("Could not apply transport version for nodes {} to cluster state", results.keySet(), e); - scheduleRetry(results.keySet(), retryNum); - } - - public Map results() { - return results; - } - } - - private static class TransportVersionUpdater implements ClusterStateTaskExecutor { - @Override - public ClusterState execute(BatchExecutionContext context) throws Exception { - ClusterState.Builder builder = ClusterState.builder(context.initialState()); - boolean modified = false; - for (var c : context.taskContexts()) { - for (var e : c.getTask().results().entrySet()) { - // this node's transport version might have been updated already/node has gone away - var cvMap = builder.compatibilityVersions(); - TransportVersion recordedTv = Optional.ofNullable(cvMap.get(e.getKey())) - .map(CompatibilityVersions::transportVersion) - .orElse(null); - assert (recordedTv != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) - : "Node " + e.getKey() + " is in the cluster but does not have an associated transport version recorded"; - if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION)) { - builder.putCompatibilityVersions(e.getKey(), e.getValue(), Map.of()); // unknown mappings versions - modified = true; - } - } - c.success(() -> {}); - } - return modified ? builder.build() : context.initialState(); - } - } - - @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") - private static Map getCompatibilityVersions(ClusterState clusterState) { - return clusterState.compatibilityVersions(); - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - if (event.localNodeMaster() == false) return; // only if we're master - - // if the min node version > 8.8.0, and the cluster state has some transport versions == 8.8.0, - // then refresh all inferred transport versions to their real versions - // now that everything should understand cluster state with transport versions - if (featureService.clusterHasFeature(event.state(), FIX_TRANSPORT_VERSION) - && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { - - // find all the relevant nodes - Set nodes = getCompatibilityVersions(event.state()).entrySet() - .stream() - .filter(e -> e.getValue().transportVersion().equals(INFERRED_TRANSPORT_VERSION)) - .map(Map.Entry::getKey) - .collect(Collectors.toSet()); - - updateTransportVersions(nodes, 0); - } - } - - private void scheduleRetry(Set nodes, int thisRetryNum) { - // just keep retrying until this succeeds - logger.debug("Scheduling retry {} for nodes {}", thisRetryNum + 1, nodes); - scheduler.schedule(() -> updateTransportVersions(nodes, thisRetryNum + 1), RETRY_TIME, executor); - } - - private void updateTransportVersions(Set nodes, int retryNum) { - // some might already be in-progress - Set outstandingNodes = Sets.newHashSetWithExpectedSize(nodes.size()); - synchronized (pendingNodes) { - for (String n : nodes) { - if (pendingNodes.add(n)) { - outstandingNodes.add(n); - } - } - } - if (outstandingNodes.isEmpty()) { - // all nodes already have in-progress requests - return; - } - - NodesInfoRequest request = new NodesInfoRequest(outstandingNodes.toArray(String[]::new)); - request.clear(); // only requesting base data - client.nodesInfo(request, new ActionListener<>() { - @Override - public void onResponse(NodesInfoResponse response) { - pendingNodes.removeAll(outstandingNodes); - handleResponse(response, retryNum); - } - - @Override - public void onFailure(Exception e) { - pendingNodes.removeAll(outstandingNodes); - logger.warn("Could not read transport versions for nodes {}", outstandingNodes, e); - scheduleRetry(outstandingNodes, retryNum); - } - }); - } - - private void handleResponse(NodesInfoResponse response, int retryNum) { - if (response.hasFailures()) { - Set failedNodes = new HashSet<>(); - for (FailedNodeException fne : response.failures()) { - logger.warn("Failed to read transport version info from node {}", fne.nodeId(), fne); - failedNodes.add(fne.nodeId()); - } - scheduleRetry(failedNodes, retryNum); - } - // carry on and read what we can - - Map results = response.getNodes() - .stream() - .collect(Collectors.toUnmodifiableMap(n -> n.getNode().getId(), NodeInfo::getTransportVersion)); - - if (results.isEmpty() == false) { - taskQueue.submitTask("update-transport-version", new NodeTransportVersionTask(results, retryNum), null); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 926056fec3ec8..c0fe0bc32fb08 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -82,6 +82,7 @@ public enum ReferenceDocs { CIRCUIT_BREAKER_ERRORS, ALLOCATION_EXPLAIN_NO_COPIES, ALLOCATION_EXPLAIN_MAX_RETRY, + SECURE_SETTINGS, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java index 9c97cb8fe7e85..7ea58ee326a79 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java @@ -10,7 +10,7 @@ package org.elasticsearch.common; import java.nio.ByteBuffer; -import java.util.Base64; +import java.util.function.Supplier; /** * Generates a base64-encoded, k-ordered UUID string optimized for compression and efficient indexing. @@ -28,18 +28,25 @@ * The result is a compact base64-encoded string, optimized for efficient compression of the _id field in an inverted index. */ public class TimeBasedKOrderedUUIDGenerator extends TimeBasedUUIDGenerator { - private static final Base64.Encoder BASE_64_NO_PADDING = Base64.getEncoder().withoutPadding(); + + public TimeBasedKOrderedUUIDGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + super(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } @Override public String getBase64UUID() { - final int sequenceId = this.sequenceNumber.incrementAndGet() & 0x00FF_FFFF; + final int sequenceId = sequenceNumber.incrementAndGet() & 0x00FF_FFFF; // Calculate timestamp to ensure ordering and avoid backward movement in case of time shifts. // Uses AtomicLong to guarantee that timestamp increases even if the system clock moves backward. // If the sequenceId overflows (reaches 0 within the same millisecond), the timestamp is incremented // to ensure strict ordering. long timestamp = this.lastTimestamp.accumulateAndGet( - currentTimeMillis(), + timestampSupplier.get(), sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max ); @@ -68,6 +75,6 @@ public String getBase64UUID() { assert buffer.position() == uuidBytes.length; - return BASE_64_NO_PADDING.encodeToString(uuidBytes); + return Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(uuidBytes); } } diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index 2ed979ae66ffa..9da878fd4af64 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -11,6 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; /** * These are essentially flake ids but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. We also reorder @@ -19,15 +20,14 @@ * For more information about flake ids, check out * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ */ - class TimeBasedUUIDGenerator implements UUIDGenerator { // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - protected final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + protected final AtomicInteger sequenceNumber; + protected final AtomicLong lastTimestamp; - // Used to ensure clock moves forward: - protected final AtomicLong lastTimestamp = new AtomicLong(0); + protected final Supplier timestampSupplier; private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); @@ -35,18 +35,26 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { assert SECURE_MUNGED_ADDRESS.length == 6; } - // protected for testing - protected long currentTimeMillis() { - return System.currentTimeMillis(); + static final int SIZE_IN_BYTES = 15; + private final byte[] macAddress; + + TimeBasedUUIDGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + this.timestampSupplier = timestampSupplier; + // NOTE: getting the mac address every time using the supplier is expensive, hence we cache it. + this.macAddress = macAddressSupplier.get(); + this.sequenceNumber = new AtomicInteger(sequenceIdSupplier.get()); + // Used to ensure clock moves forward: + this.lastTimestamp = new AtomicLong(0); } - // protected for testing protected byte[] macAddress() { - return SECURE_MUNGED_ADDRESS; + return macAddress; } - static final int SIZE_IN_BYTES = 15; - @Override public String getBase64UUID() { final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; @@ -55,7 +63,7 @@ public String getBase64UUID() { // still vulnerable if we are shut down, clock goes backwards, and we restart... for this we // randomize the sequenceNumber on init to decrease chance of collision: long timestamp = this.lastTimestamp.accumulateAndGet( - currentTimeMillis(), + timestampSupplier.get(), // Always force the clock to increment whenever sequence number is 0, in case we have a long // time-slip backwards: sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index 0f73b8172c10f..ebcb375bc01bc 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -12,13 +12,29 @@ import org.elasticsearch.common.settings.SecureString; import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +/** + * Utility class for generating various types of UUIDs. + */ public class UUIDs { + private static final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + public static final Supplier DEFAULT_TIMESTAMP_SUPPLIER = System::currentTimeMillis; + public static final Supplier DEFAULT_SEQUENCE_ID_SUPPLIER = sequenceNumber::incrementAndGet; + public static final Supplier DEFAULT_MAC_ADDRESS_SUPPLIER = MacAddressProvider::getSecureMungedAddress; + private static final UUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); + private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator( + DEFAULT_TIMESTAMP_SUPPLIER, + DEFAULT_SEQUENCE_ID_SUPPLIER, + DEFAULT_MAC_ADDRESS_SUPPLIER + ); - private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - - private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator(); - private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); + private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator( + DEFAULT_TIMESTAMP_SUPPLIER, + DEFAULT_SEQUENCE_ID_SUPPLIER, + DEFAULT_MAC_ADDRESS_SUPPLIER + ); /** * The length of a UUID string generated by {@link #base64UUID}. diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index 36ca2df08724d..64fe57b3ea373 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.UpdateForV10; import java.io.InputStream; import java.security.GeneralSecurityException; @@ -26,6 +27,7 @@ public abstract class SecureSetting extends Setting { /** Determines whether legacy settings with sensitive values should be allowed. */ + @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this should no longer be in use, even in v9, so can go away in v10 private static final boolean ALLOW_INSECURE_SETTINGS = Booleans.parseBoolean(System.getProperty("es.allow_insecure_settings", "false")); private static final Set ALLOWED_PROPERTIES = EnumSet.of( @@ -185,9 +187,7 @@ private InsecureStringSetting(String name) { @Override public SecureString get(Settings settings) { if (ALLOW_INSECURE_SETTINGS == false && exists(settings)) { - throw new IllegalArgumentException( - "Setting [" + name + "] is insecure, " + "but property [allow_insecure_settings] is not set" - ); + throw new IllegalArgumentException("Setting [" + name + "] is insecure, use the elasticsearch keystore instead"); } return super.get(settings); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 9cfa22d0a3cfb..9464ccbcc7aa3 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; import org.elasticsearch.plugins.internal.XContentParserDecorator; import org.elasticsearch.xcontent.DeprecationHandler; @@ -626,7 +627,22 @@ public static BytesReference toXContent(ChunkedToXContent toXContent, XContentTy */ public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) throws IOException { - try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + return toXContent(toXContent, xContentType, RestApiVersion.current(), params, humanReadable); + } + + /** + * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided + * {@link XContentType}. Wraps the output into a new anonymous object according to the value returned + * by the {@link ToXContent#isFragment()} method returns. + */ + public static BytesReference toXContent( + ToXContent toXContent, + XContentType xContentType, + RestApiVersion restApiVersion, + Params params, + boolean humanReadable + ) throws IOException { + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent(), restApiVersion)) { builder.humanReadable(humanReadable); if (toXContent.isFragment()) { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/features/FeatureData.java b/server/src/main/java/org/elasticsearch/features/FeatureData.java index 991bb4d82be3d..65b95eae27e06 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureData.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureData.java @@ -9,25 +9,19 @@ package org.elasticsearch.features; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.NavigableMap; import java.util.Set; -import java.util.TreeMap; - -import static org.elasticsearch.features.FeatureService.CLUSTER_FEATURES_ADDED_VERSION; /** - * Reads and consolidate features exposed by a list {@link FeatureSpecification}, grouping them into historical features and node - * features for the consumption of {@link FeatureService} + * Reads and consolidate features exposed by a list {@link FeatureSpecification}, + * grouping them together for the consumption of {@link FeatureService} */ public class FeatureData { @@ -40,19 +34,14 @@ public class FeatureData { } } - private final NavigableMap> historicalFeatures; private final Map nodeFeatures; - private FeatureData(NavigableMap> historicalFeatures, Map nodeFeatures) { - this.historicalFeatures = historicalFeatures; + private FeatureData(Map nodeFeatures) { this.nodeFeatures = nodeFeatures; } public static FeatureData createFromSpecifications(List specs) { Map allFeatures = new HashMap<>(); - - // Initialize historicalFeatures with empty version to guarantee there's a floor entry for every version - NavigableMap> historicalFeatures = new TreeMap<>(Map.of(Version.V_EMPTY, Set.of())); Map nodeFeatures = new HashMap<>(); for (FeatureSpecification spec : specs) { Set specFeatures = spec.getFeatures(); @@ -61,39 +50,6 @@ public static FeatureData createFromSpecifications(List new HashSet<>()).add(hfe.getKey().id()); - } - for (NodeFeature f : specFeatures) { FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); if (existing != null && existing.getClass() != spec.getClass()) { @@ -106,24 +62,7 @@ public static FeatureData createFromSpecifications(List> consolidateHistoricalFeatures( - NavigableMap> declaredHistoricalFeatures - ) { - // update each version by adding in all features from previous versions - Set featureAggregator = new HashSet<>(); - for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { - featureAggregator.addAll(versions.getValue()); - versions.setValue(Set.copyOf(featureAggregator)); - } - - return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); - } - - public NavigableMap> getHistoricalFeatures() { - return historicalFeatures; + return new FeatureData(Map.copyOf(nodeFeatures)); } public Map getNodeFeatures() { diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index 1d911a75a4838..9a0ac7cafc183 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -9,7 +9,6 @@ package org.elasticsearch.features; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; @@ -17,8 +16,6 @@ import java.util.List; import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; /** * Manages information on the features supported by nodes in the cluster. @@ -34,9 +31,6 @@ public class FeatureService { private static final Logger logger = LogManager.getLogger(FeatureService.class); - public static final Version CLUSTER_FEATURES_ADDED_VERSION = Version.V_8_12_0; - - private final NavigableMap> historicalFeatures; private final Map nodeFeatures; /** @@ -47,13 +41,12 @@ public FeatureService(List specs) { var featureData = FeatureData.createFromSpecifications(specs); nodeFeatures = featureData.getNodeFeatures(); - historicalFeatures = featureData.getHistoricalFeatures(); logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } /** - * The non-historical features supported by this node. + * The features supported by this node. * @return Map of {@code feature-id} to its declaring {@code NodeFeature} object. */ public Map getNodeFeatures() { @@ -65,11 +58,6 @@ public Map getNodeFeatures() { */ @SuppressForbidden(reason = "We need basic feature information from cluster state") public boolean clusterHasFeature(ClusterState state, NodeFeature feature) { - if (state.clusterFeatures().clusterHasFeature(feature)) { - return true; - } - - var features = historicalFeatures.floorEntry(state.getNodes().getMinNodeVersion()); - return features != null && features.getValue().contains(feature.id()); + return state.clusterFeatures().clusterHasFeature(feature); } } diff --git a/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java b/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java index 03f0dd89f172e..c37bc4488f109 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureSpecification.java @@ -9,9 +9,6 @@ package org.elasticsearch.features; -import org.elasticsearch.Version; - -import java.util.Map; import java.util.Set; /** @@ -49,12 +46,4 @@ default Set getFeatures() { default Set getTestFeatures() { return Set.of(); } - - /** - * Returns information on historical features that should be deemed to be present on all nodes - * on or above the {@link Version} specified. - */ - default Map getHistoricalFeatures() { - return Map.of(); - } } diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java index 6d106199610d6..091dbc0eae742 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java +++ b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java @@ -9,34 +9,17 @@ package org.elasticsearch.health; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import java.util.Map; import java.util.Set; public class HealthFeatures implements FeatureSpecification { - public static final NodeFeature SUPPORTS_HEALTH = new NodeFeature("health.supports_health"); - public static final NodeFeature SUPPORTS_HEALTH_REPORT_API = new NodeFeature("health.supports_health_report_api"); - public static final NodeFeature SUPPORTS_SHARDS_CAPACITY_INDICATOR = new NodeFeature("health.shards_capacity_indicator"); public static final NodeFeature SUPPORTS_EXTENDED_REPOSITORY_INDICATOR = new NodeFeature("health.extended_repository_indicator"); @Override public Set getFeatures() { return Set.of(SUPPORTS_EXTENDED_REPOSITORY_INDICATOR); } - - @Override - public Map getHistoricalFeatures() { - return Map.of( - SUPPORTS_HEALTH, - Version.V_8_5_0, // health accessible via /_internal/_health - SUPPORTS_HEALTH_REPORT_API, - Version.V_8_7_0, // health accessible via /_health_report - SUPPORTS_SHARDS_CAPACITY_INDICATOR, - Version.V_8_8_0 - ); - } } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java index 44fc65fab534f..0d30e157a3a09 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadataService.java @@ -28,7 +28,6 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.features.FeatureService; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.health.HealthFeatures; import java.util.List; import java.util.stream.Stream; @@ -137,7 +136,7 @@ private void updateOnHealthNodeEnabledChange(boolean enabled) { private boolean canPostClusterStateUpdates(ClusterState state) { // Wait until every node in the cluster supports health checks - return isMaster && state.clusterRecovered() && featureService.clusterHasFeature(state, HealthFeatures.SUPPORTS_HEALTH); + return isMaster && state.clusterRecovered(); } private void updateOnClusterStateChange(ClusterChangedEvent event) { diff --git a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java index e38ce7ac92a05..c975e1d1abd91 100644 --- a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -91,15 +90,6 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources ClusterState clusterState = clusterService.state(); Map diskHealthInfoMap = healthInfo.diskInfoByNode(); if (diskHealthInfoMap == null || diskHealthInfoMap.isEmpty()) { - if (featureService.clusterHasFeature(clusterState, HealthFeatures.SUPPORTS_HEALTH) == false) { - return createIndicator( - HealthStatus.GREEN, - "No disk usage data available. The cluster currently has mixed versions (an upgrade may be in progress).", - HealthIndicatorDetails.EMPTY, - List.of(), - List.of() - ); - } /* * If there is no disk health info, that either means that a new health node was just elected, or something is seriously * wrong with health data collection on the health node. Either way, we immediately return UNKNOWN. If there are at least diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index a08de9abb4aed..aab9e972cba73 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.metadata.HealthMetadata; import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException; import org.elasticsearch.health.node.selection.HealthNode; @@ -200,7 +199,6 @@ public void clusterChanged(ClusterChangedEvent event) { } } prerequisitesFulfilled = event.state().clusterRecovered() - && featureService.clusterHasFeature(event.state(), HealthFeatures.SUPPORTS_HEALTH) && HealthMetadata.getFromClusterState(event.state()) != null && currentHealthNode != null && currentMasterNode != null; diff --git a/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java index b02bbd95bb9ae..4dd94cfc046c9 100644 --- a/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorService.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -111,15 +110,6 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources var state = clusterService.state(); var healthMetadata = HealthMetadata.getFromClusterState(state); if (healthMetadata == null || healthMetadata.getShardLimitsMetadata() == null) { - if (featureService.clusterHasFeature(state, HealthFeatures.SUPPORTS_SHARDS_CAPACITY_INDICATOR) == false) { - return createIndicator( - HealthStatus.GREEN, - "No shard limits configured yet. The cluster currently has mixed versions (an upgrade may be in progress).", - HealthIndicatorDetails.EMPTY, - List.of(), - List.of() - ); - } return unknownIndicator(); } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index 3357936e5f10c..5991bc248ba76 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; @@ -157,11 +156,8 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( // visible for testing void startTask(ClusterChangedEvent event) { - // Wait until every node in the cluster supports health checks - if (event.localNodeMaster() - && event.state().clusterRecovered() - && HealthNode.findTask(event.state()) == null - && featureService.clusterHasFeature(event.state(), HealthFeatures.SUPPORTS_HEALTH)) { + // Wait until master is stable before starting health task + if (event.localNodeMaster() && event.state().clusterRecovered() && HealthNode.findTask(event.state()) == null) { persistentTasksService.sendStartRequest( TASK_NAME, TASK_NAME, @@ -186,8 +182,8 @@ void startTask(ClusterChangedEvent event) { // visible for testing void shuttingDown(ClusterChangedEvent event) { - DiscoveryNode node = clusterService.localNode(); - if (isNodeShuttingDown(event, node.getId())) { + if (isNodeShuttingDown(event)) { + var node = event.state().getNodes().getLocalNode(); abortTaskIfApplicable("node [{" + node.getName() + "}{" + node.getId() + "}] shutting down"); } } @@ -202,9 +198,18 @@ void abortTaskIfApplicable(String reason) { } } - private static boolean isNodeShuttingDown(ClusterChangedEvent event, String nodeId) { - return event.previousState().metadata().nodeShutdowns().contains(nodeId) == false - && event.state().metadata().nodeShutdowns().contains(nodeId); + private static boolean isNodeShuttingDown(ClusterChangedEvent event) { + if (event.metadataChanged() == false) { + return false; + } + var shutdownsOld = event.previousState().metadata().nodeShutdowns(); + var shutdownsNew = event.state().metadata().nodeShutdowns(); + if (shutdownsNew == shutdownsOld) { + return false; + } + String nodeId = event.state().nodes().getLocalNodeId(); + return shutdownsOld.contains(nodeId) == false && shutdownsNew.contains(nodeId); + } public static List getNamedXContentParsers() { diff --git a/server/src/main/java/org/elasticsearch/http/HttpBody.java b/server/src/main/java/org/elasticsearch/http/HttpBody.java index a10487502ed3c..6571125677fab 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpBody.java +++ b/server/src/main/java/org/elasticsearch/http/HttpBody.java @@ -9,7 +9,6 @@ package org.elasticsearch.http; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Nullable; @@ -21,11 +20,11 @@ public sealed interface HttpBody extends Releasable permits HttpBody.Full, HttpBody.Stream { static Full fromBytesReference(BytesReference bytesRef) { - return new ByteRefHttpBody(bytesRef); + return new ByteRefHttpBody(ReleasableBytesReference.wrap(bytesRef)); } static Full empty() { - return new ByteRefHttpBody(BytesArray.EMPTY); + return new ByteRefHttpBody(ReleasableBytesReference.empty()); } default boolean isFull() { @@ -56,7 +55,7 @@ default Stream asStream() { * Full content represents a complete http body content that can be accessed immediately. */ non-sealed interface Full extends HttpBody { - BytesReference bytes(); + ReleasableBytesReference bytes(); @Override default void close() {} @@ -114,5 +113,5 @@ interface ChunkHandler extends Releasable { default void close() {} } - record ByteRefHttpBody(BytesReference bytes) implements Full {} + record ByteRefHttpBody(ReleasableBytesReference bytes) implements Full {} } diff --git a/server/src/main/java/org/elasticsearch/http/HttpRequest.java b/server/src/main/java/org/elasticsearch/http/HttpRequest.java index ca6e51f2cec08..b4b1bb84433c9 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpRequest.java +++ b/server/src/main/java/org/elasticsearch/http/HttpRequest.java @@ -52,10 +52,4 @@ enum HttpVersion { */ void release(); - /** - * If this instances uses any pooled resources, creates a copy of this instance that does not use any pooled resources and releases - * any resources associated with this instance. If the instance does not use any shared resources, returns itself. - * @return a safe unpooled http request - */ - HttpRequest releaseAndCopy(); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java index 6a553d5dc5440..8c997a9766baa 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedFunction; @@ -54,7 +55,7 @@ Settings getAdditionalIndexSettings( /** * Infrastructure class that holds services that can be used by {@link IndexSettingProvider} instances. */ - record Parameters(CheckedFunction mapperServiceFactory) { + record Parameters(ClusterService clusterService, CheckedFunction mapperServiceFactory) { } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 9264b9e1c3a20..6344aa2a72ca9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -130,9 +130,12 @@ private static Version parseUnchecked(String version) { public static final IndexVersion ENABLE_IGNORE_ABOVE_LOGSDB = def(8_517_00_0, Version.LUCENE_9_12_0); public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0); + public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID_BACKPORT = def(8_520_00_0, Version.LUCENE_9_12_0); + public static final IndexVersion V8_DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); + public static final IndexVersion DEPRECATE_SOURCE_MODE_MAPPER = def(9_003_00_0, Version.LUCENE_10_0_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/index/IndexingPressure.java b/server/src/main/java/org/elasticsearch/index/IndexingPressure.java index f80e8a89f5cf2..43ae38fea6018 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingPressure.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingPressure.java @@ -105,6 +105,9 @@ public class IndexingPressure { private final AtomicLong replicaRejections = new AtomicLong(0); private final AtomicLong primaryDocumentRejections = new AtomicLong(0); + private final AtomicLong lowWaterMarkSplits = new AtomicLong(0); + private final AtomicLong highWaterMarkSplits = new AtomicLong(0); + private final long lowWatermark; private final long lowWatermarkSize; private final long highWatermark; @@ -265,11 +268,20 @@ public Releasable markReplicaOperationStarted(int operations, long bytes, boolea public boolean shouldSplitBulk(long size) { long currentUsage = (currentCombinedCoordinatingAndPrimaryBytes.get() + currentReplicaBytes.get()); - return (currentUsage >= lowWatermark && size >= lowWatermarkSize) || (currentUsage >= highWatermark && size >= highWatermarkSize); + if (currentUsage >= highWatermark && size >= highWatermarkSize) { + highWaterMarkSplits.getAndIncrement(); + logger.trace(() -> Strings.format("Split bulk due to high watermark: current bytes [%d] and size [%d]", currentUsage, size)); + return (true); + } + if (currentUsage >= lowWatermark && size >= lowWatermarkSize) { + lowWaterMarkSplits.getAndIncrement(); + logger.trace(() -> Strings.format("Split bulk due to low watermark: current bytes [%d] and size [%d]", currentUsage, size)); + return (true); + } + return (false); } public IndexingPressureStats stats() { - // TODO: Update stats with new primary/replica/coordinating limits and add throttling stats return new IndexingPressureStats( totalCombinedCoordinatingAndPrimaryBytes.get(), totalCoordinatingBytes.get(), @@ -290,7 +302,9 @@ public IndexingPressureStats stats() { currentPrimaryOps.get(), currentReplicaOps.get(), primaryDocumentRejections.get(), - totalCoordinatingRequests.get() + totalCoordinatingRequests.get(), + lowWaterMarkSplits.get(), + highWaterMarkSplits.get() ); } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 2c1175648c219..91c4b780db0bd 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -189,7 +189,11 @@ public Analyzer getAnalyzer(String analyzer) throws IOException { } }); } - return analyzerProvider.get(environment, analyzer).get(); + + return overridePositionIncrementGap( + (NamedAnalyzer) analyzerProvider.get(environment, analyzer).get(), + TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + ); } @Override @@ -720,13 +724,8 @@ private static NamedAnalyzer produceAnalyzer( throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer"); } NamedAnalyzer analyzer; - if (analyzerF instanceof NamedAnalyzer) { - // if we got a named analyzer back, use it... - analyzer = (NamedAnalyzer) analyzerF; - if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) { - // unless the positionIncrementGap needs to be overridden - analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap); - } + if (analyzerF instanceof NamedAnalyzer namedAnalyzer) { + analyzer = overridePositionIncrementGap(namedAnalyzer, overridePositionIncrementGap); } else { analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap); } @@ -734,6 +733,13 @@ private static NamedAnalyzer produceAnalyzer( return analyzer; } + private static NamedAnalyzer overridePositionIncrementGap(NamedAnalyzer analyzer, int overridePositionIncrementGap) { + if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) { + analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap); + } + return analyzer; + } + private static void processNormalizerFactory( String name, AnalyzerProvider normalizerFactory, diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 5277999271984..33a8487bb33a3 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -58,8 +58,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; -import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; - /** * This is a cache for {@link BitDocIdSet} based filters and is unbounded by size or time. *

@@ -105,10 +103,7 @@ static boolean shouldLoadRandomAccessFiltersEagerly(IndexSettings settings) { boolean loadFiltersEagerlySetting = settings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); boolean isStateless = DiscoveryNode.isStateless(settings.getNodeSettings()); if (isStateless) { - return loadFiltersEagerlySetting - && (DiscoveryNode.hasRole(settings.getNodeSettings(), DiscoveryNodeRole.SEARCH_ROLE) - || (DiscoveryNode.hasRole(settings.getNodeSettings(), DiscoveryNodeRole.INDEX_ROLE) - && INDEX_FAST_REFRESH_SETTING.get(settings.getSettings()))); + return loadFiltersEagerlySetting && DiscoveryNode.hasRole(settings.getNodeSettings(), DiscoveryNodeRole.SEARCH_ROLE); } else { return loadFiltersEagerlySetting; } @@ -341,5 +336,13 @@ public interface Listener { * @param accountable the bitsets ram representation */ void onRemoval(ShardId shardId, Accountable accountable); + + Listener NOOP = new Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) {} + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) {} + }; } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java index 9c2a08a69002c..4d3d37ab4f3af 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldFormatSupplier.java @@ -20,10 +20,15 @@ import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; +import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.internal.CompletionsPostingsFormatExtension; +import org.elasticsearch.plugins.ExtensionLoader; + +import java.util.ServiceLoader; /** * Class that encapsulates the logic of figuring out the most appropriate file format for a given field, across postings, doc values and @@ -53,15 +58,28 @@ public PostingsFormat getPostingsFormatForField(String field) { private PostingsFormat internalGetPostingsFormatForField(String field) { if (mapperService != null) { - final PostingsFormat format = mapperService.mappingLookup().getPostingsFormat(field); - if (format != null) { - return format; + Mapper mapper = mapperService.mappingLookup().getMapper(field); + if (mapper instanceof CompletionFieldMapper) { + return PostingsFormatHolder.POSTINGS_FORMAT; } } // return our own posting format using PFOR return es812PostingsFormat; } + private static class PostingsFormatHolder { + private static final PostingsFormat POSTINGS_FORMAT = getPostingsFormat(); + + private static PostingsFormat getPostingsFormat() { + String defaultName = "Completion912"; // Caution: changing this name will result in exceptions if a field is created during a + // rolling upgrade and the new codec (specified by the name) is not available on all nodes in the cluster. + String codecName = ExtensionLoader.loadSingleton(ServiceLoader.load(CompletionsPostingsFormatExtension.class)) + .map(CompletionsPostingsFormatExtension::getFormatName) + .orElse(defaultName); + return PostingsFormat.forName(codecName); + } + } + boolean useBloomFilter(String field) { if (mapperService == null) { return false; diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/BQSpaceUtils.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/BQSpaceUtils.java index 68363b5926a6b..f9fad74835683 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/vectors/BQSpaceUtils.java +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/BQSpaceUtils.java @@ -23,56 +23,38 @@ public class BQSpaceUtils { public static final short B_QUERY = 4; - // the first four bits masked - private static final int B_QUERY_MASK = 15; /** * Copied from Lucene, replace with Lucene's implementation sometime after Lucene 10 + * Transpose the query vector into a byte array allowing for efficient bitwise operations with the + * index bit vectors. The idea here is to organize the query vector bits such that the first bit + * of every dimension is in the first set dimensions bits, or (dimensions/8) bytes. The second, + * third, and fourth bits are in the second, third, and fourth set of dimensions bits, + * respectively. This allows for direct bitwise comparisons with the stored index vectors through + * summing the bitwise results with the relative required bit shifts. + * * @param q the query vector, assumed to be half-byte quantized with values between 0 and 15 - * @param dimensions the number of dimensions in the query vector * @param quantQueryByte the byte array to store the transposed query vector */ - public static void transposeBin(byte[] q, int dimensions, byte[] quantQueryByte) { - // TODO: rewrite this in Panama Vector API - int qOffset = 0; - final byte[] v1 = new byte[4]; - final byte[] v = new byte[32]; - for (int i = 0; i < dimensions; i += 32) { - // for every four bytes we shift left (with remainder across those bytes) - for (int j = 0; j < v.length; j += 4) { - v[j] = (byte) (q[qOffset + j] << B_QUERY | ((q[qOffset + j] >>> B_QUERY) & B_QUERY_MASK)); - v[j + 1] = (byte) (q[qOffset + j + 1] << B_QUERY | ((q[qOffset + j + 1] >>> B_QUERY) & B_QUERY_MASK)); - v[j + 2] = (byte) (q[qOffset + j + 2] << B_QUERY | ((q[qOffset + j + 2] >>> B_QUERY) & B_QUERY_MASK)); - v[j + 3] = (byte) (q[qOffset + j + 3] << B_QUERY | ((q[qOffset + j + 3] >>> B_QUERY) & B_QUERY_MASK)); - } - for (int j = 0; j < B_QUERY; j++) { - moveMaskEpi8Byte(v, v1); - for (int k = 0; k < 4; k++) { - quantQueryByte[(B_QUERY - j - 1) * (dimensions / 8) + i / 8 + k] = v1[k]; - v1[k] = 0; - } - for (int k = 0; k < v.length; k += 4) { - v[k] = (byte) (v[k] + v[k]); - v[k + 1] = (byte) (v[k + 1] + v[k + 1]); - v[k + 2] = (byte) (v[k + 2] + v[k + 2]); - v[k + 3] = (byte) (v[k + 3] + v[k + 3]); - } - } - qOffset += 32; - } - } - - private static void moveMaskEpi8Byte(byte[] v, byte[] v1b) { - int m = 0; - for (int k = 0; k < v.length; k++) { - if ((v[k] & 0b10000000) == 0b10000000) { - v1b[m] |= 0b00000001; - } - if (k % 8 == 7) { - m++; - } else { - v1b[m] <<= 1; + public static void transposeHalfByte(byte[] q, byte[] quantQueryByte) { + for (int i = 0; i < q.length;) { + assert q[i] >= 0 && q[i] <= 15; + int lowerByte = 0; + int lowerMiddleByte = 0; + int upperMiddleByte = 0; + int upperByte = 0; + for (int j = 7; j >= 0 && i < q.length; j--) { + lowerByte |= (q[i] & 1) << j; + lowerMiddleByte |= ((q[i] >> 1) & 1) << j; + upperMiddleByte |= ((q[i] >> 2) & 1) << j; + upperByte |= ((q[i] >> 3) & 1) << j; + i++; } + int index = ((i + 7) / 8) - 1; + quantQueryByte[index] = (byte) lowerByte; + quantQueryByte[index + quantQueryByte.length / 4] = (byte) lowerMiddleByte; + quantQueryByte[index + quantQueryByte.length / 2] = (byte) upperMiddleByte; + quantQueryByte[index + 3 * quantQueryByte.length / 4] = (byte) upperByte; } } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/vectors/BinaryQuantizer.java b/server/src/main/java/org/elasticsearch/index/codec/vectors/BinaryQuantizer.java index 192fb9092ac3a..aa72904fe1341 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/vectors/BinaryQuantizer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/vectors/BinaryQuantizer.java @@ -223,9 +223,7 @@ public QueryAndIndexResults quantizeQueryAndIndex(float[] vector, byte[] indexDe // q¯ = Δ · q¯𝑢 + 𝑣𝑙 · 1𝐷 // q¯ is an approximation of q′ (scalar quantized approximation) - // FIXME: vectors need to be padded but that's expensive; update transponseBin to deal - byteQuery = BQVectorUtils.pad(byteQuery, discretizedDimensions); - BQSpaceUtils.transposeBin(byteQuery, discretizedDimensions, queryDestination); + BQSpaceUtils.transposeHalfByte(byteQuery, queryDestination); QueryFactors factors = new QueryFactors(quantResult.quantizedSum, distToC, lower, width, normVmC, vDotC); final float[] indexCorrections; if (similarityFunction == EUCLIDEAN) { @@ -366,9 +364,7 @@ public QueryFactors quantizeForQuery(float[] vector, byte[] destination, float[] // q¯ = Δ · q¯𝑢 + 𝑣𝑙 · 1𝐷 // q¯ is an approximation of q′ (scalar quantized approximation) - // FIXME: vectors need to be padded but that's expensive; update transponseBin to deal - byteQuery = BQVectorUtils.pad(byteQuery, discretizedDimensions); - BQSpaceUtils.transposeBin(byteQuery, discretizedDimensions, destination); + BQSpaceUtils.transposeHalfByte(byteQuery, destination); QueryFactors factors; if (similarityFunction != EUCLIDEAN) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 53ccccdbd4bab..bb229c795a83e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -8,7 +8,6 @@ */ package org.elasticsearch.index.mapper; -import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; @@ -344,10 +343,6 @@ public CompletionFieldType fieldType() { return (CompletionFieldType) super.fieldType(); } - static PostingsFormat postingsFormat() { - return PostingsFormat.forName("Completion912"); - } - @Override public boolean parsesArrayValue() { return true; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 10484a1c26098..1c9321737ab5f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -9,7 +9,9 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSortConfig; @@ -25,6 +27,7 @@ public class DocumentMapper { private final DocumentParser documentParser; private final MapperMetrics mapperMetrics; private final IndexVersion indexVersion; + private final Logger logger; static final NodeFeature INDEX_SORTING_ON_NESTED = new NodeFeature("mapper.index_sorting_on_nested"); @@ -44,7 +47,8 @@ public static DocumentMapper createEmpty(MapperService mapperService) { mapping, mapping.toCompressedXContent(), IndexVersion.current(), - mapperService.getMapperMetrics() + mapperService.getMapperMetrics(), + mapperService.index().getName() ); } @@ -53,7 +57,8 @@ public static DocumentMapper createEmpty(MapperService mapperService) { Mapping mapping, CompressedXContent source, IndexVersion version, - MapperMetrics mapperMetrics + MapperMetrics mapperMetrics, + String indexName ) { this.documentParser = documentParser; this.type = mapping.getRoot().fullPath(); @@ -61,11 +66,18 @@ public static DocumentMapper createEmpty(MapperService mapperService) { this.mappingSource = source; this.mapperMetrics = mapperMetrics; this.indexVersion = version; + this.logger = Loggers.getLogger(getClass(), indexName); assert mapping.toCompressedXContent().equals(source) || isSyntheticSourceMalformed(source, version) : "provided source [" + source + "] differs from mapping [" + mapping.toCompressedXContent() + "]"; } + private void maybeLogDebug(Exception ex) { + if (logger.isDebugEnabled()) { + logger.debug("Error while parsing document: " + ex.getMessage(), ex); + } + } + /** * Indexes built at v.8.7 were missing an explicit entry for synthetic_source. * This got restored in v.8.10 to avoid confusion. The change is only restricted to mapping printout, it has no @@ -110,7 +122,12 @@ public MappingLookup mappers() { } public ParsedDocument parse(SourceToParse source) throws DocumentParsingException { - return documentParser.parseDocument(source, mappingLookup); + try { + return documentParser.parseDocument(source, mappingLookup); + } catch (Exception e) { + maybeLogDebug(e); + throw e; + } } public void validate(IndexSettings settings, boolean checkLimits) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 5743baeec536d..333c37381c587 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -57,6 +57,10 @@ public Set getFeatures() { ); } + public static final NodeFeature CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX = new NodeFeature( + "mapper.constant_keyword.synthetic_source_write_fix" + ); + @Override public Set getTestFeatures() { return Set.of( @@ -66,7 +70,8 @@ public Set getTestFeatures() { SourceFieldMapper.SOURCE_MODE_FROM_INDEX_SETTING, IgnoredSourceFieldMapper.IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD, IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS, - MapperService.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT + MapperService.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT, + CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 7f952153c6453..1673b1719d8bf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -585,7 +585,14 @@ private DocumentMapper doMerge(String type, MergeReason reason, Map indexAnalyzersMap; private final List indexTimeScriptMappers; private final Mapping mapping; - private final Set completionFields; private final int totalFieldsCount; /** @@ -161,7 +158,6 @@ private MappingLookup( this.nestedLookup = NestedLookup.build(nestedMappers); final Map indexAnalyzersMap = new HashMap<>(); - final Set completionFields = new HashSet<>(); final List indexTimeScriptMappers = new ArrayList<>(); for (FieldMapper mapper : mappers) { if (objects.containsKey(mapper.fullPath())) { @@ -174,9 +170,6 @@ private MappingLookup( if (mapper.hasScript()) { indexTimeScriptMappers.add(mapper); } - if (mapper instanceof CompletionFieldMapper) { - completionFields.add(mapper.fullPath()); - } } for (FieldAliasMapper aliasMapper : aliasMappers) { @@ -211,7 +204,6 @@ private MappingLookup( this.objectMappers = Map.copyOf(objects); this.runtimeFieldMappersCount = runtimeFields.size(); this.indexAnalyzersMap = Map.copyOf(indexAnalyzersMap); - this.completionFields = Set.copyOf(completionFields); this.indexTimeScriptMappers = List.copyOf(indexTimeScriptMappers); runtimeFields.stream().flatMap(RuntimeField::asMappedFieldTypes).map(MappedFieldType::name).forEach(this::validateDoesNotShadow); @@ -285,15 +277,6 @@ public Iterable fieldMappers() { return fieldMappers.values(); } - /** - * Gets the postings format for a particular field - * @param field the field to retrieve a postings format for - * @return the postings format for the field, or {@code null} if the default format should be used - */ - public PostingsFormat getPostingsFormat(String field) { - return completionFields.contains(field) ? CompletionFieldMapper.postingsFormat() : null; - } - void checkLimits(IndexSettings settings) { checkFieldLimit(settings.getMappingTotalFieldsLimit()); checkObjectDepthLimit(settings.getMappingDepthLimit()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java index f30a0089e4eff..2ca14473c8385 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java @@ -10,6 +10,8 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -31,6 +33,7 @@ public final class MappingParser { private final Supplier, MetadataFieldMapper>> metadataMappersSupplier; private final Map metadataMapperParsers; private final Function documentTypeResolver; + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MappingParser.class); MappingParser( Supplier mappingParserContextSupplier, @@ -144,6 +147,12 @@ Mapping parse(@Nullable String type, MergeReason reason, Map map } @SuppressWarnings("unchecked") Map fieldNodeMap = (Map) fieldNode; + if (reason == MergeReason.INDEX_TEMPLATE + && SourceFieldMapper.NAME.equals(fieldName) + && fieldNodeMap.containsKey("mode") + && SourceFieldMapper.onOrAfterDeprecateModeVersion(mappingParserContext.indexVersionCreated())) { + deprecationLogger.critical(DeprecationCategory.MAPPINGS, "mapping_source_mode", SourceFieldMapper.DEPRECATION_WARNING); + } MetadataFieldMapper metadataFieldMapper = typeParser.parse(fieldName, fieldNodeMap, mappingParserContext).build(); metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); assert fieldNodeMap.isEmpty(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 9a2c9517dfd05..31aa787c3f758 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -10,16 +10,13 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Iterator; import java.util.Map; -import java.util.Set; import java.util.function.Function; /** @@ -135,8 +132,6 @@ public final MetadataFieldMapper build(MapperBuilderContext context) { return build(); } - private static final Set UNSUPPORTED_PARAMETERS_8_6_0 = Set.of("type", "fields", "copy_to", "boost"); - public final void parseMetadataField(String name, MappingParserContext parserContext, Map fieldNode) { final Parameter[] params = getParameters(); Map> paramsMap = Maps.newHashMapWithExpectedSize(params.length); @@ -149,20 +144,6 @@ public final void parseMetadataField(String name, MappingParserContext parserCon final Object propNode = entry.getValue(); Parameter parameter = paramsMap.get(propName); if (parameter == null) { - if (UNSUPPORTED_PARAMETERS_8_6_0.contains(propName)) { - if (parserContext.indexVersionCreated().onOrAfter(IndexVersions.V_8_6_0)) { - // silently ignore type, and a few other parameters: sadly we've been doing this for a long time - deprecationLogger.warn( - DeprecationCategory.API, - propName, - "Parameter [{}] has no effect on metadata field [{}] and will be removed in future", - propName, - name - ); - } - iterator.remove(); - continue; - } throw new MapperParsingException("unknown parameter [" + propName + "] on metadata field [" + name + "]"); } parameter.parse(name, parserContext, propNode); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index dd25cd6eb80a3..b97e04fcddb5d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -25,6 +25,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; @@ -68,6 +69,9 @@ public class SourceFieldMapper extends MetadataFieldMapper { return indexMode.defaultSourceMode().name(); }, "index.mapping.source.mode", value -> {}, Setting.Property.Final, Setting.Property.IndexScope); + public static final String DEPRECATION_WARNING = "Configuring source mode in mappings is deprecated and will be removed " + + "in future versions. Use [index.mapping.source.mode] index setting instead."; + /** The source mode */ public enum Mode { DISABLED, @@ -79,28 +83,32 @@ public enum Mode { null, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, - Strings.EMPTY_ARRAY + Strings.EMPTY_ARRAY, + false ); private static final SourceFieldMapper STORED = new SourceFieldMapper( Mode.STORED, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, - Strings.EMPTY_ARRAY + Strings.EMPTY_ARRAY, + false ); private static final SourceFieldMapper SYNTHETIC = new SourceFieldMapper( Mode.SYNTHETIC, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, - Strings.EMPTY_ARRAY + Strings.EMPTY_ARRAY, + false ); private static final SourceFieldMapper DISABLED = new SourceFieldMapper( Mode.DISABLED, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, - Strings.EMPTY_ARRAY + Strings.EMPTY_ARRAY, + false ); public static class Defaults { @@ -134,16 +142,7 @@ public static class Builder extends MetadataFieldMapper.Builder { * The default mode for TimeSeries is left empty on purpose, so that mapping printings include the synthetic * source mode. */ - private final Parameter mode = new Parameter<>( - "mode", - true, - () -> null, - (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), - m -> toType(m).enabled.explicit() ? null : toType(m).mode, - (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), - v -> v.toString().toLowerCase(Locale.ROOT) - ).setMergeValidator((previous, current, conflicts) -> (previous == current) || current != Mode.STORED) - .setSerializerCheck((includeDefaults, isConfigured, value) -> value != null); // don't emit if `enabled` is configured + private final Parameter mode; private final Parameter> includes = Parameter.stringArrayParam( "includes", false, @@ -158,15 +157,28 @@ public static class Builder extends MetadataFieldMapper.Builder { private final Settings settings; private final IndexMode indexMode; + private boolean serializeMode; private final boolean supportsNonDefaultParameterValues; - public Builder(IndexMode indexMode, final Settings settings, boolean supportsCheckForNonDefaultParams) { + public Builder(IndexMode indexMode, final Settings settings, boolean supportsCheckForNonDefaultParams, boolean serializeMode) { super(Defaults.NAME); this.settings = settings; this.indexMode = indexMode; this.supportsNonDefaultParameterValues = supportsCheckForNonDefaultParams == false || settings.getAsBoolean(LOSSY_PARAMETERS_ALLOWED_SETTING_NAME, true); + this.serializeMode = serializeMode; + this.mode = new Parameter<>( + "mode", + true, + () -> null, + (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), + m -> toType(m).enabled.explicit() ? null : toType(m).mode, + (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), + v -> v.toString().toLowerCase(Locale.ROOT) + ).setMergeValidator((previous, current, conflicts) -> (previous == current) || current != Mode.STORED) + // don't emit if `enabled` is configured + .setSerializerCheck((includeDefaults, isConfigured, value) -> serializeMode && value != null); } public Builder setSynthetic() { @@ -219,21 +231,22 @@ public SourceFieldMapper build() { if (sourceMode == Mode.SYNTHETIC && (includes.getValue().isEmpty() == false || excludes.getValue().isEmpty() == false)) { throw new IllegalArgumentException("filtering the stored _source is incompatible with synthetic source"); } - - SourceFieldMapper sourceFieldMapper; - if (isDefault()) { + if (mode.isConfigured()) { + serializeMode = true; + } + final SourceFieldMapper sourceFieldMapper; + if (isDefault() && sourceMode == null) { // Needed for bwc so that "mode" is not serialized in case of a standard index with stored source. - if (sourceMode == null) { - sourceFieldMapper = DEFAULT; - } else { - sourceFieldMapper = resolveStaticInstance(sourceMode); - } + sourceFieldMapper = DEFAULT; + } else if (isDefault() && serializeMode == false && sourceMode != null) { + sourceFieldMapper = resolveStaticInstance(sourceMode); } else { sourceFieldMapper = new SourceFieldMapper( sourceMode, enabled.get(), includes.getValue().toArray(Strings.EMPTY_ARRAY), - excludes.getValue().toArray(Strings.EMPTY_ARRAY) + excludes.getValue().toArray(Strings.EMPTY_ARRAY), + serializeMode ); } if (indexMode != null) { @@ -283,13 +296,17 @@ private static SourceFieldMapper resolveStaticInstance(final Mode sourceMode) { if (indexMode == IndexMode.STANDARD && settingSourceMode == Mode.STORED) { return DEFAULT; } - - return resolveStaticInstance(settingSourceMode); + if (onOrAfterDeprecateModeVersion(c.indexVersionCreated())) { + return resolveStaticInstance(settingSourceMode); + } else { + return new SourceFieldMapper(settingSourceMode, Explicit.IMPLICIT_TRUE, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY, true); + } }, c -> new Builder( c.getIndexSettings().getMode(), c.getSettings(), - c.indexVersionCreated().onOrAfter(IndexVersions.SOURCE_MAPPER_LOSSY_PARAMS_CHECK) + c.indexVersionCreated().onOrAfter(IndexVersions.SOURCE_MAPPER_LOSSY_PARAMS_CHECK), + onOrAfterDeprecateModeVersion(c.indexVersionCreated()) == false ) ); @@ -330,8 +347,9 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { } } - // nullable for bwc reasons + // nullable for bwc reasons - TODO: fold this into serializeMode private final @Nullable Mode mode; + private final boolean serializeMode; private final Explicit enabled; /** indicates whether the source will always exist and be complete, for use by features like the update API */ @@ -341,7 +359,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { private final String[] excludes; private final SourceFilter sourceFilter; - private SourceFieldMapper(Mode mode, Explicit enabled, String[] includes, String[] excludes) { + private SourceFieldMapper(Mode mode, Explicit enabled, String[] includes, String[] excludes, boolean serializeMode) { super(new SourceFieldType((enabled.explicit() && enabled.value()) || (enabled.explicit() == false && mode != Mode.DISABLED))); this.mode = mode; this.enabled = enabled; @@ -349,6 +367,7 @@ private SourceFieldMapper(Mode mode, Explicit enabled, String[] include this.includes = includes; this.excludes = excludes; this.complete = stored() && sourceFilter == null; + this.serializeMode = serializeMode; } private static SourceFilter buildSourceFilter(String[] includes, String[] excludes) { @@ -419,7 +438,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(null, Settings.EMPTY, false).init(this); + return new Builder(null, Settings.EMPTY, false, serializeMode).init(this); } /** @@ -451,4 +470,9 @@ public boolean isDisabled() { public boolean isStored() { return mode == null || mode == Mode.STORED; } + + public static boolean onOrAfterDeprecateModeVersion(IndexVersion version) { + return version.onOrAfter(IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER) + || version.between(IndexVersions.V8_DEPRECATE_SOURCE_MODE_MAPPER, IndexVersions.UPGRADE_TO_LUCENE_10_0_0); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValues.java new file mode 100644 index 0000000000000..a91960832239f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValues.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper.vectors; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.script.field.vectors.MultiDenseVector; + +import java.util.Iterator; + +public class MultiDenseVectorScriptDocValues extends ScriptDocValues { + + public static final String MISSING_VECTOR_FIELD_MESSAGE = "A document doesn't have a value for a multi-vector field!"; + + private final int dims; + protected final MultiDenseVectorSupplier dvSupplier; + + public MultiDenseVectorScriptDocValues(MultiDenseVectorSupplier supplier, int dims) { + super(supplier); + this.dvSupplier = supplier; + this.dims = dims; + } + + public int dims() { + return dims; + } + + private MultiDenseVector getCheckedVector() { + MultiDenseVector vector = dvSupplier.getInternal(); + if (vector == null) { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + return vector; + } + + /** + * Get multi-dense vector's value as an array of floats + */ + public Iterator getVectorValues() { + return getCheckedVector().getVectors(); + } + + /** + * Get dense vector's magnitude + */ + public float[] getMagnitudes() { + return getCheckedVector().getMagnitudes(); + } + + @Override + public BytesRef get(int index) { + throw new UnsupportedOperationException( + "accessing a multi-vector field's value through 'get' or 'value' is not supported, use 'vectorValues' or 'magnitudes' instead." + ); + } + + @Override + public int size() { + MultiDenseVector mdv = dvSupplier.getInternal(); + if (mdv != null) { + return mdv.size(); + } + return 0; + } + + public interface MultiDenseVectorSupplier extends Supplier { + @Override + default BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + MultiDenseVector getInternal(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java index cc6fb38274451..b9716d315f33a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorDVLeafFieldData.java @@ -9,37 +9,44 @@ package org.elasticsearch.index.mapper.vectors; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fielddata.LeafFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.vectors.BitMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.ByteMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.FloatMultiDenseVectorDocValuesField; + +import java.io.IOException; final class MultiVectorDVLeafFieldData implements LeafFieldData { private final LeafReader reader; private final String field; - private final IndexVersion indexVersion; private final DenseVectorFieldMapper.ElementType elementType; private final int dims; - MultiVectorDVLeafFieldData( - LeafReader reader, - String field, - IndexVersion indexVersion, - DenseVectorFieldMapper.ElementType elementType, - int dims - ) { + MultiVectorDVLeafFieldData(LeafReader reader, String field, DenseVectorFieldMapper.ElementType elementType, int dims) { this.reader = reader; this.field = field; - this.indexVersion = indexVersion; this.elementType = elementType; this.dims = dims; } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { - // TODO - return null; + try { + BinaryDocValues values = DocValues.getBinary(reader, field); + BinaryDocValues magnitudeValues = DocValues.getBinary(reader, field + MultiDenseVectorFieldMapper.VECTOR_MAGNITUDES_SUFFIX); + return switch (elementType) { + case BYTE -> new ByteMultiDenseVectorDocValuesField(values, magnitudeValues, name, elementType, dims); + case FLOAT -> new FloatMultiDenseVectorDocValuesField(values, magnitudeValues, name, elementType, dims); + case BIT -> new BitMultiDenseVectorDocValuesField(values, magnitudeValues, name, elementType, dims); + }; + } catch (IOException e) { + throw new IllegalStateException("Cannot load doc values for multi-vector field!", e); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java index 65ef492ce052b..44a666e25a611 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/MultiVectorIndexFieldData.java @@ -55,7 +55,7 @@ public ValuesSourceType getValuesSourceType() { @Override public MultiVectorDVLeafFieldData load(LeafReaderContext context) { - return new MultiVectorDVLeafFieldData(context.reader(), fieldName, indexVersion, elementType, dims); + return new MultiVectorDVLeafFieldData(context.reader(), fieldName, elementType, dims); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java index 9d09a7493d605..3db2d164846bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/VectorEncoderDecoder.java @@ -84,4 +84,24 @@ public static void decodeDenseVector(IndexVersion indexVersion, BytesRef vectorB } } + public static float[] getMultiMagnitudes(BytesRef magnitudes) { + assert magnitudes.length % Float.BYTES == 0; + float[] multiMagnitudes = new float[magnitudes.length / Float.BYTES]; + ByteBuffer byteBuffer = ByteBuffer.wrap(magnitudes.bytes, magnitudes.offset, magnitudes.length).order(ByteOrder.LITTLE_ENDIAN); + for (int i = 0; i < magnitudes.length / Float.BYTES; i++) { + multiMagnitudes[i] = byteBuffer.getFloat(); + } + return multiMagnitudes; + } + + public static void decodeMultiDenseVector(BytesRef vectorBR, int numVectors, float[][] multiVectorValue) { + if (vectorBR == null) { + throw new IllegalArgumentException(MultiDenseVectorScriptDocValues.MISSING_VECTOR_FIELD_MESSAGE); + } + FloatBuffer fb = ByteBuffer.wrap(vectorBR.bytes, vectorBR.offset, vectorBR.length).order(ByteOrder.LITTLE_ENDIAN).asFloatBuffer(); + for (int i = 0; i < numVectors; i++) { + fb.get(multiVectorValue[i]); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 626875c75a5fe..83bca7d27aeeb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -112,6 +112,13 @@ public QueryBuilder query() { return query; } + /** + * Returns path to the searched nested object. + */ + public String path() { + return path; + } + /** * Returns inner hit definition in the scope of this query and reusing the defined type and query. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index f1081d06d649d..9f6a2be8cdbc7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -45,10 +45,6 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i public static final ParseField LTE_FIELD = new ParseField("lte"); public static final ParseField GTE_FIELD = new ParseField("gte"); - public static final ParseField FROM_FIELD = new ParseField("from").withAllDeprecated(); - public static final ParseField TO_FIELD = new ParseField("to").withAllDeprecated(); - private static final ParseField INCLUDE_LOWER_FIELD = new ParseField("include_lower").withAllDeprecated(); - private static final ParseField INCLUDE_UPPER_FIELD = new ParseField("include_upper").withAllDeprecated(); public static final ParseField GT_FIELD = new ParseField("gt"); public static final ParseField LT_FIELD = new ParseField("lt"); private static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone"); @@ -367,15 +363,7 @@ public static RangeQueryBuilder fromXContent(XContentParser parser) throws IOExc if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else { - if (FROM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - from = maybeConvertToBytesRef(parser.objectBytes()); - } else if (TO_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - to = maybeConvertToBytesRef(parser.objectBytes()); - } else if (INCLUDE_LOWER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - includeLower = parser.booleanValue(); - } else if (INCLUDE_UPPER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - includeUpper = parser.booleanValue(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { boost = parser.floatValue(); } else if (GT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { from = maybeConvertToBytesRef(parser.objectBytes()); diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 70ba9950f7689..d8bd460f6f819 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -245,7 +245,11 @@ public ShardSnapshotResult getShardSnapshotResult() { } public void ensureNotAborted() { - switch (stage.get()) { + ensureNotAborted(stage.get()); + } + + public static void ensureNotAborted(Stage shardSnapshotStage) { + switch (shardSnapshotStage) { case ABORTED -> throw new AbortedSnapshotException(); case PAUSING -> throw new PausedSnapshotException(); } diff --git a/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java b/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java index b5197274dd519..0a56db56b2c95 100644 --- a/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java +++ b/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java @@ -36,6 +36,12 @@ public class IndexingPressureStats implements Writeable, ToXContentFragment { private final long primaryDocumentRejections; private final long memoryLimit; + /* Count number of splits due to SPLIT_BULK_LOW_WATERMARK and SPLIT_BULK_HIGH_WATERMARK + These 2 stats are not serialized via X content yet. + */ + private final long lowWaterMarkSplits; + private final long highWaterMarkSplits; + // These fields will be used for additional back-pressure and metrics in the future private final long totalCoordinatingOps; private final long totalCoordinatingRequests; @@ -85,6 +91,14 @@ public IndexingPressureStats(StreamInput in) throws IOException { } else { totalCoordinatingRequests = -1L; } + + if (in.getTransportVersion().onOrAfter(TransportVersions.INDEXING_PRESSURE_THROTTLING_STATS)) { + lowWaterMarkSplits = in.readVLong(); + highWaterMarkSplits = in.readVLong(); + } else { + lowWaterMarkSplits = -1L; + highWaterMarkSplits = -1L; + } } public IndexingPressureStats( @@ -107,7 +121,9 @@ public IndexingPressureStats( long currentPrimaryOps, long currentReplicaOps, long primaryDocumentRejections, - long totalCoordinatingRequests + long totalCoordinatingRequests, + long lowWaterMarkSplits, + long highWaterMarkSplits ) { this.totalCombinedCoordinatingAndPrimaryBytes = totalCombinedCoordinatingAndPrimaryBytes; this.totalCoordinatingBytes = totalCoordinatingBytes; @@ -131,6 +147,9 @@ public IndexingPressureStats( this.primaryDocumentRejections = primaryDocumentRejections; this.totalCoordinatingRequests = totalCoordinatingRequests; + + this.lowWaterMarkSplits = lowWaterMarkSplits; + this.highWaterMarkSplits = highWaterMarkSplits; } @Override @@ -160,6 +179,11 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { out.writeVLong(totalCoordinatingRequests); } + + if (out.getTransportVersion().onOrAfter(TransportVersions.INDEXING_PRESSURE_THROTTLING_STATS)) { + out.writeVLong(lowWaterMarkSplits); + out.writeVLong(highWaterMarkSplits); + } } public long getTotalCombinedCoordinatingAndPrimaryBytes() { @@ -242,6 +266,14 @@ public long getTotalCoordinatingRequests() { return totalCoordinatingRequests; } + public long getHighWaterMarkSplits() { + return highWaterMarkSplits; + } + + public long getLowWaterMarkSplits() { + return lowWaterMarkSplits; + } + private static final String COMBINED = "combined_coordinating_and_primary"; private static final String COMBINED_IN_BYTES = "combined_coordinating_and_primary_in_bytes"; private static final String COORDINATING = "coordinating"; diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 887fe486b6003..e6b499c07f189 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1217,14 +1217,14 @@ public static String digestToString(long digest) { * mechanism that is used in some repository plugins (S3 for example). However, the checksum is only calculated on * the first read. All consecutive reads of the same data are not used to calculate the checksum. */ - static class VerifyingIndexInput extends ChecksumIndexInput { + public static class VerifyingIndexInput extends ChecksumIndexInput { private final IndexInput input; private final Checksum digest; private final long checksumPosition; private final byte[] checksum = new byte[8]; private long verifiedPosition = 0; - VerifyingIndexInput(IndexInput input) { + public VerifyingIndexInput(IndexInput input) { this(input, new BufferedChecksum(new CRC32())); } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 3ac61bbca1a21..27d832241bfed 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ResolvedIndices; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -79,7 +78,6 @@ import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.CloseUtils; @@ -211,8 +209,6 @@ public class IndicesService extends AbstractLifecycleComponent Setting.Property.NodeScope ); - static final NodeFeature SUPPORTS_AUTO_PUT = new NodeFeature("indices.auto_put_supported"); - /** * The node's settings. */ @@ -910,9 +906,7 @@ public void createShard( .setConcreteIndex(shardRouting.index()) .source(mapping.source().string(), XContentType.JSON); client.execute( - featureService.clusterHasFeature(clusterService.state(), SUPPORTS_AUTO_PUT) - ? TransportAutoPutMappingAction.TYPE - : TransportPutMappingAction.TYPE, + TransportAutoPutMappingAction.TYPE, putMappingRequestAcknowledgedRequest.ackTimeout(TimeValue.MAX_VALUE).masterNodeTimeout(TimeValue.MAX_VALUE), new RefCountAwareThreadedActionListener<>(threadPool.generic(), listener.map(ignored -> null)) ); diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 856b30d1c19e8..42cda4da1a9e6 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -110,7 +110,7 @@ public class SystemIndices { public static final String SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_system_index_access_allowed"; public static final String EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_external_system_index_access_origin"; - public static final String UPGRADED_INDEX_SUFFIX = "-reindexed-for-8"; + public static final String UPGRADED_INDEX_SUFFIX = "-reindexed-for-9"; private static final Automaton EMPTY = Automata.makeEmpty(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 308f1894b78db..c8d31d2060caf 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -397,6 +397,36 @@ record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, Str } indexShard.recoverLocallyUpToGlobalCheckpoint(ActionListener.assertOnce(l)); }) + // peer recovery can consume a lot of disk space, so it's worth cleaning up locally ahead of the attempt + // operation runs only if the previous operation succeeded, and returns the previous operation's result. + // Failures at this stage aren't fatal, we can attempt to recover and then clean up again at the end. #104473 + .andThenApply(startingSeqNo -> { + Store.MetadataSnapshot snapshot; + try { + snapshot = indexShard.snapshotStoreMetadata(); + } catch (IOException e) { + // We give up on the contents for any checked exception thrown by snapshotStoreMetadata. We don't want to + // allow those to bubble up and interrupt recovery because the subsequent recovery attempt is expected + // to fix up these problems for us if it completes successfully. + if (e instanceof org.apache.lucene.index.IndexNotFoundException) { + // this is the expected case on first recovery, so don't spam the logs with exceptions + logger.debug(() -> format("no snapshot found for shard %s, treating as empty", indexShard.shardId())); + } else { + logger.warn(() -> format("unable to load snapshot for shard %s, treating as empty", indexShard.shardId()), e); + } + snapshot = Store.MetadataSnapshot.EMPTY; + } + + Store store = indexShard.store(); + store.incRef(); + try { + logger.debug(() -> format("cleaning up index directory for %s before recovery", indexShard.shardId())); + store.cleanupAndVerify("cleanup before peer recovery", snapshot); + } finally { + store.decRef(); + } + return startingSeqNo; + }) // now construct the start-recovery request .andThenApply(startingSeqNo -> { assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index c6e09f61befa0..4497254aad1f0 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -112,16 +112,11 @@ void infer( ); /** - * Chunk long text according to {@code chunkingOptions} or the - * model defaults if {@code chunkingOptions} contains unset - * values. - * * @param model The model * @param query Inference query, mainly for re-ranking * @param input Inference input * @param taskSettings Settings in the request to override the model's defaults * @param inputType For search, ingest etc - * @param chunkingOptions The window and span options to apply * @param timeout The timeout for the request * @param listener Chunked Inference result listener */ @@ -131,7 +126,6 @@ void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ); diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index ce61f197b4831..1494d2a46f9d0 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -519,7 +519,7 @@ public static boolean isNoOpPipelineUpdate(ClusterState state, PutPipelineReques && currentIngestMetadata.getPipelines().containsKey(request.getId())) { var pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); var currentPipeline = currentIngestMetadata.getPipelines().get(request.getId()); - if (currentPipeline.getConfigAsMap().equals(pipelineConfig)) { + if (currentPipeline.getConfig().equals(pipelineConfig)) { return true; } } @@ -1292,7 +1292,7 @@ synchronized void innerUpdatePipelines(IngestMetadata newIngestMetadata) { try { Pipeline newPipeline = Pipeline.create( newConfiguration.getId(), - newConfiguration.getConfigAsMap(), + newConfiguration.getConfig(false), processorFactories, scriptService ); @@ -1416,7 +1416,7 @@ public

Collection getPipelineWithProcessorType(Cla public synchronized void reloadPipeline(String id) throws Exception { PipelineHolder holder = pipelines.get(id); - Pipeline updatedPipeline = Pipeline.create(id, holder.configuration.getConfigAsMap(), processorFactories, scriptService); + Pipeline updatedPipeline = Pipeline.create(id, holder.configuration.getConfig(false), processorFactories, scriptService); Map updatedPipelines = new HashMap<>(this.pipelines); updatedPipelines.put(id, new PipelineHolder(holder.configuration, updatedPipeline)); this.pipelines = Map.copyOf(updatedPipelines); diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 9067cdb2040fd..64142caf4189d 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -9,12 +9,14 @@ package org.elasticsearch.ingest; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.ObjectParser; @@ -22,26 +24,32 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Objects; /** - * Encapsulates a pipeline's id and configuration as a blob + * Encapsulates a pipeline's id and configuration as a loosely typed map -- see {@link Pipeline} for the + * parsed and processed object(s) that a pipeline configuration will become. This class is used for things + * like keeping track of pipelines in the cluster state (where a pipeline is 'just some json') whereas the + * {@link Pipeline} class is used in the actual processing of ingest documents through pipelines in the + * {@link IngestService}. */ public final class PipelineConfiguration implements SimpleDiffable, ToXContentObject { private static final ObjectParser PARSER = new ObjectParser<>("pipeline_config", true, Builder::new); static { PARSER.declareString(Builder::setId, new ParseField("id")); - PARSER.declareField((parser, builder, aVoid) -> { - XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent()); - contentBuilder.generator().copyCurrentStructure(parser); - builder.setConfig(BytesReference.bytes(contentBuilder), contentBuilder.contentType()); - }, new ParseField("config"), ObjectParser.ValueType.OBJECT); - + PARSER.declareField( + (parser, builder, aVoid) -> builder.setConfig(parser.map()), + new ParseField("config"), + ObjectParser.ValueType.OBJECT + ); } public static ContextParser getParser() { @@ -51,56 +59,94 @@ public static ContextParser getParser() { private static class Builder { private String id; - private BytesReference config; - private XContentType xContentType; + private Map config; void setId(String id) { this.id = id; } - void setConfig(BytesReference config, XContentType xContentType) { + void setConfig(Map config) { this.config = config; - this.xContentType = xContentType; } PipelineConfiguration build() { - return new PipelineConfiguration(id, config, xContentType); + return new PipelineConfiguration(id, config); } } private final String id; - // Store config as bytes reference, because the config is only used when the pipeline store reads the cluster state - // and the way the map of maps config is read requires a deep copy (it removes instead of gets entries to check for unused options) - // also the get pipeline api just directly returns this to the caller - private final BytesReference config; - private final XContentType xContentType; + private final Map config; - public PipelineConfiguration(String id, BytesReference config, XContentType xContentType) { + public PipelineConfiguration(String id, Map config) { this.id = Objects.requireNonNull(id); - this.config = Objects.requireNonNull(config); - this.xContentType = Objects.requireNonNull(xContentType); + this.config = deepCopy(config, true); // defensive deep copy + } + + /** + * A convenience constructor that parses some bytes as a map representing a pipeline's config and then delegates to the + * conventional {@link #PipelineConfiguration(String, Map)} constructor. + * + * @param id the id of the pipeline + * @param config a parse-able bytes reference that will return a pipeline configuration + * @param xContentType the content-type to use while parsing the pipeline configuration + */ + public PipelineConfiguration(String id, BytesReference config, XContentType xContentType) { + this(id, XContentHelper.convertToMap(config, true, xContentType).v2()); } public String getId() { return id; } - public Map getConfigAsMap() { - return XContentHelper.convertToMap(config, true, xContentType).v2(); + /** + * @return a reference to the unmodifiable configuration map for this pipeline + */ + public Map getConfig() { + return getConfig(true); } - // pkg-private for tests - XContentType getXContentType() { - return xContentType; + /** + * @param unmodifiable whether the returned map should be unmodifiable or not + * @return a reference to the unmodifiable config map (if unmodifiable is true) or + * a reference to a freshly-created mutable deep copy of the config map (if unmodifiable is false) + */ + public Map getConfig(boolean unmodifiable) { + if (unmodifiable) { + return config; // already unmodifiable + } else { + return deepCopy(config, false); + } + } + + @SuppressWarnings("unchecked") + private static T deepCopy(final T value, final boolean unmodifiable) { + return (T) innerDeepCopy(value, unmodifiable); } - // pkg-private for tests - BytesReference getConfig() { - return config; + private static Object innerDeepCopy(final Object value, final boolean unmodifiable) { + if (value instanceof Map mapValue) { + final Map copy = Maps.newLinkedHashMapWithExpectedSize(mapValue.size()); // n.b. maintain ordering + for (Map.Entry entry : mapValue.entrySet()) { + copy.put(innerDeepCopy(entry.getKey(), unmodifiable), innerDeepCopy(entry.getValue(), unmodifiable)); + } + return unmodifiable ? Collections.unmodifiableMap(copy) : copy; + } else if (value instanceof List listValue) { + final List copy = new ArrayList<>(listValue.size()); + for (Object itemValue : listValue) { + copy.add(innerDeepCopy(itemValue, unmodifiable)); + } + return unmodifiable ? Collections.unmodifiableList(copy) : copy; + } else { + // if this list of expected value types ends up not being exhaustive, then we want to learn about that + // at development time, but it's probably better to err on the side of passing through the value at runtime + assert (value == null || value instanceof String || value instanceof Number || value instanceof Boolean) + : "unexpected value type [" + value.getClass() + "]"; + return value; + } } public Integer getVersion() { - Object o = getConfigAsMap().get("version"); + Object o = config.get("version"); if (o == null) { return null; } else if (o instanceof Number number) { @@ -114,13 +160,22 @@ public Integer getVersion() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("id", id); - builder.field("config", getConfigAsMap()); + builder.field("config", config); builder.endObject(); return builder; } public static PipelineConfiguration readFrom(StreamInput in) throws IOException { - return new PipelineConfiguration(in.readString(), in.readBytesReference(), in.readEnum(XContentType.class)); + final String id = in.readString(); + final Map config; + if (in.getTransportVersion().onOrAfter(TransportVersions.INGEST_PIPELINE_CONFIGURATION_AS_MAP)) { + config = in.readGenericMap(); + } else { + final BytesReference bytes = in.readSlicedBytesReference(); + final XContentType type = in.readEnum(XContentType.class); + config = XContentHelper.convertToMap(bytes, true, type).v2(); + } + return new PipelineConfiguration(id, config); } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -135,8 +190,14 @@ public String toString() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - out.writeBytesReference(config); - XContentHelper.writeTo(out, xContentType); + if (out.getTransportVersion().onOrAfter(TransportVersions.INGEST_PIPELINE_CONFIGURATION_AS_MAP)) { + out.writeGenericMap(config); + } else { + XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).prettyPrint(); + builder.map(config); + out.writeBytesReference(BytesReference.bytes(builder)); + XContentHelper.writeTo(out, XContentType.JSON); + } } @Override @@ -147,14 +208,14 @@ public boolean equals(Object o) { PipelineConfiguration that = (PipelineConfiguration) o; if (id.equals(that.id) == false) return false; - return getConfigAsMap().equals(that.getConfigAsMap()); + return config.equals(that.config); } @Override public int hashCode() { int result = id.hashCode(); - result = 31 * result + getConfigAsMap().hashCode(); + result = 31 * result + config.hashCode(); return result; } @@ -164,7 +225,7 @@ public int hashCode() { *

The given upgrader is applied to the config map for any processor of the given type. */ PipelineConfiguration maybeUpgradeProcessors(String type, IngestMetadata.ProcessorConfigUpgrader upgrader) { - Map mutableConfigMap = getConfigAsMap(); + Map mutableConfigMap = getConfig(false); boolean changed = false; // This should be a List of Maps, where the keys are processor types and the values are config maps. // But we'll skip upgrading rather than fail if not. @@ -180,11 +241,7 @@ PipelineConfiguration maybeUpgradeProcessors(String type, IngestMetadata.Process } } if (changed) { - try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { - return new PipelineConfiguration(id, BytesReference.bytes(builder.map(mutableConfigMap)), xContentType); - } catch (IOException e) { - throw new UncheckedIOException(e); - } + return new PipelineConfiguration(id, mutableConfigMap); } else { return this; } diff --git a/server/src/main/java/org/elasticsearch/internal/CompletionsPostingsFormatExtension.java b/server/src/main/java/org/elasticsearch/internal/CompletionsPostingsFormatExtension.java new file mode 100644 index 0000000000000..bb28d4dd6c901 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/internal/CompletionsPostingsFormatExtension.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.internal; + +import org.apache.lucene.search.suggest.document.CompletionPostingsFormat; + +/** + * Allows plugging-in the Completions Postings Format. + */ +public interface CompletionsPostingsFormatExtension { + + /** + * Returns the name of the {@link CompletionPostingsFormat} that Elasticsearch should use. Should return null if the extension + * is not enabled. + *

+ * Note that the name must match a codec that is available on all nodes in the cluster, otherwise IndexCorruptionExceptions will occur. + * A feature can be used to protect against this scenario, or alternatively, the codec code can be rolled out prior to its usage by this + * extension. + */ + String getFormatName(); +} diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index b14ef171ccd1d..8c903fdc634d3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; +import java.io.StringWriter; import java.io.Writer; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; @@ -105,6 +106,33 @@ public static void logLocalHotThreads(Logger logger, Level level, String prefix, } } + /** + * Capture and log the current threads on the local node. Unlike hot threads this does not sample and captures current state only. + * Useful for capturing stack traces for unexpectedly-slow operations in production. The resulting message might be large, so it is + * split per thread and logged as multiple entries. + * + * @param logger The logger to use for the logging + * @param level The log level to use for the logging. + * @param prefix The prefix to emit on each chunk of the logging. + */ + public static void logLocalCurrentThreads(Logger logger, Level level, String prefix) { + if (logger.isEnabled(level) == false) { + return; + } + + try (var writer = new StringWriter()) { + new HotThreads().busiestThreads(500).threadElementsSnapshotCount(1).detect(writer, () -> { + logger.log(level, "{}: {}", prefix, writer.toString()); + writer.getBuffer().setLength(0); + }); + } catch (Exception e) { + logger.error( + () -> org.elasticsearch.common.Strings.format("failed to write local current threads with prefix [%s]", prefix), + e + ); + } + } + public enum ReportType { CPU("cpu"), @@ -192,11 +220,12 @@ public HotThreads sortOrder(SortOrder order) { } public void detect(Writer writer) throws Exception { + detect(writer, () -> {}); + } + + public void detect(Writer writer, Runnable onNextThread) throws Exception { synchronized (mutex) { - innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), (interval) -> { - Thread.sleep(interval); - return null; - }, writer); + innerDetect(ManagementFactory.getThreadMXBean(), SunThreadInfo.INSTANCE, Thread.currentThread().getId(), writer, onNextThread); } } @@ -245,13 +274,15 @@ Map getAllValidThreadInfos(ThreadMXBean threadBean, ThreadInfo[][] captureThreadStacks(ThreadMXBean threadBean, long[] threadIds) throws InterruptedException { ThreadInfo[][] result = new ThreadInfo[threadElementsSnapshotCount][]; - for (int j = 0; j < threadElementsSnapshotCount; j++) { - // NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist, - // null will be set in the corresponding element in the returned array. A thread is alive if it has - // been started and has not yet died. + + // NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist, + // null will be set in the corresponding element in the returned array. A thread is alive if it has + // been started and has not yet died. + for (int j = 0; j < threadElementsSnapshotCount - 1; j++) { result[j] = threadBean.getThreadInfo(threadIds, Integer.MAX_VALUE); Thread.sleep(threadElementsSnapshotDelay.millis()); } + result[threadElementsSnapshotCount - 1] = threadBean.getThreadInfo(threadIds, Integer.MAX_VALUE); return result; } @@ -267,13 +298,8 @@ private double getTimeSharePercentage(long time) { return (((double) time) / interval.nanos()) * 100; } - void innerDetect( - ThreadMXBean threadBean, - SunThreadInfo sunThreadInfo, - long currentThreadId, - SleepFunction threadSleep, - Writer writer - ) throws Exception { + void innerDetect(ThreadMXBean threadBean, SunThreadInfo sunThreadInfo, long currentThreadId, Writer writer, Runnable onNextThread) + throws Exception { if (threadBean.isThreadCpuTimeSupported() == false) { throw new ElasticsearchException("thread CPU time is not supported on this JDK"); } @@ -297,10 +323,11 @@ void innerDetect( .append(", ignoreIdleThreads=") .append(Boolean.toString(ignoreIdleThreads)) .append(":\n"); + onNextThread.run(); // Capture before and after thread state with timings Map previousThreadInfos = getAllValidThreadInfos(threadBean, sunThreadInfo, currentThreadId); - threadSleep.apply(interval.millis()); + Thread.sleep(interval.millis()); Map latestThreadInfos = getAllValidThreadInfos(threadBean, sunThreadInfo, currentThreadId); latestThreadInfos.forEach((threadId, accumulator) -> accumulator.subtractPrevious(previousThreadInfos.get(threadId))); @@ -430,6 +457,7 @@ void innerDetect( } } } + onNextThread.run(); } } diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java index acc26a42e4745..94395193622e0 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java @@ -643,6 +643,34 @@ private void registerAsyncMetrics(MeterRegistry registry) { ) ); + metrics.add( + registry.registerLongAsyncCounter( + "es.indexing.coordinating.low_watermark_splits.total", + "Total number of times bulk requests are split due to SPLIT_BULK_LOW_WATERMARK", + "operations", + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(NodeStats::getIndexingPressureStats) + .map(IndexingPressureStats::getLowWaterMarkSplits) + .orElse(0L) + ) + ) + ); + + metrics.add( + registry.registerLongAsyncCounter( + "es.indexing.coordinating.high_watermark_splits.total", + "Total number of times bulk requests are split due to SPLIT_BULK_HIGH_WATERMARK", + "operations", + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(NodeStats::getIndexingPressureStats) + .map(IndexingPressureStats::getHighWaterMarkSplits) + .orElse(0L) + ) + ) + ); + metrics.add( registry.registerLongAsyncCounter( "es.flush.total.time", diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index ec4a534fc883b..80c9aafaa84b4 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -69,6 +69,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.repositories.RepositoriesService; @@ -180,8 +181,8 @@ public class Node implements Closeable { * * @param environment the initial environment for this node, which will be added to by plugins */ - public Node(Environment environment) { - this(NodeConstruction.prepareConstruction(environment, new NodeServiceProvider(), true)); + public Node(Environment environment, PluginsLoader pluginsLoader) { + this(NodeConstruction.prepareConstruction(environment, pluginsLoader, new NodeServiceProvider(), true)); } /** diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index e8b9d18a1dd08..aec8eb0c3ca67 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -42,7 +42,6 @@ import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.MasterHistoryService; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; -import org.elasticsearch.cluster.features.NodeFeaturesFixupListener; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -62,7 +61,6 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdMonitor; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.cluster.service.TransportVersionsFixupListener; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.LifecycleComponent; @@ -166,6 +164,7 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.RecoveryPlannerPlugin; import org.elasticsearch.plugins.ReloadablePlugin; @@ -188,6 +187,7 @@ import org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.reservedstate.service.FileSettingsService; +import org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService; import org.elasticsearch.rest.action.search.SearchResponseMetrics; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; @@ -262,6 +262,7 @@ class NodeConstruction { */ static NodeConstruction prepareConstruction( Environment initialEnvironment, + PluginsLoader pluginsLoader, NodeServiceProvider serviceProvider, boolean forbidPrivateIndexSettings ) { @@ -269,7 +270,7 @@ static NodeConstruction prepareConstruction( try { NodeConstruction constructor = new NodeConstruction(closeables); - Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); + Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider, pluginsLoader); constructor.loadLoggingDataProviders(); TelemetryProvider telemetryProvider = constructor.createTelemetryProvider(settings); ThreadPool threadPool = constructor.createThreadPool(settings, telemetryProvider.getMeterRegistry()); @@ -402,7 +403,7 @@ private static Optional getSinglePlugin(Stream plugins, Class plugi return Optional.of(plugin); } - private Settings createEnvironment(Environment initialEnvironment, NodeServiceProvider serviceProvider) { + private Settings createEnvironment(Environment initialEnvironment, NodeServiceProvider serviceProvider, PluginsLoader pluginsLoader) { // Pass the node settings to the DeprecationLogger class so that it can have the deprecation.skip_deprecated_settings setting: Settings envSettings = initialEnvironment.settings(); DeprecationLogger.initialize(envSettings); @@ -475,7 +476,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr (e, apmConfig) -> logger.error("failed to delete temporary APM config file [{}], reason: [{}]", apmConfig, e.getMessage()) ); - pluginsService = serviceProvider.newPluginService(initialEnvironment, envSettings); + pluginsService = serviceProvider.newPluginService(initialEnvironment, pluginsLoader); modules.bindToInstance(PluginsService.class, pluginsService); Settings settings = Node.mergePluginSettings(pluginsService.pluginMap(), envSettings); @@ -788,10 +789,6 @@ private void construct( if (DiscoveryNode.isMasterNode(settings)) { clusterService.addListener(new SystemIndexMappingUpdateService(systemIndices, client)); - clusterService.addListener( - new TransportVersionsFixupListener(clusterService, client.admin().cluster(), featureService, threadPool) - ); - clusterService.addListener(new NodeFeaturesFixupListener(clusterService, client.admin().cluster(), threadPool)); } SourceFieldMetrics sourceFieldMetrics = new SourceFieldMetrics( @@ -826,7 +823,7 @@ private void construct( .searchOperationListeners(searchOperationListeners) .build(); - final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); + final var parameters = new IndexSettingProvider.Parameters(clusterService, indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( Sets.union( builtinIndexSettingProviders(), @@ -924,6 +921,9 @@ private void construct( final IndexingPressure indexingLimits = new IndexingPressure(settings); final IncrementalBulkService incrementalBulkService = new IncrementalBulkService(client, indexingLimits); + final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); + modules.bindToInstance(ResponseCollectorService.class, responseCollectorService); + ActionModule actionModule = new ActionModule( settings, clusterModule.getIndexNameExpressionResolver(), @@ -1006,7 +1006,6 @@ private void construct( taskManager, telemetryProvider.getTracer() ); - final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchResponseMetrics searchResponseMetrics = new SearchResponseMetrics(telemetryProvider.getMeterRegistry()); final SearchTransportService searchTransportService = new SearchTransportService( transportService, @@ -1036,10 +1035,12 @@ private void construct( actionModule.getReservedClusterStateService().installStateHandler(new ReservedRepositoryAction(repositoriesService)); actionModule.getReservedClusterStateService().installStateHandler(new ReservedPipelineAction()); + FileSettingsHealthIndicatorService fileSettingsHealthIndicatorService = new FileSettingsHealthIndicatorService(); FileSettingsService fileSettingsService = new FileSettingsService( clusterService, actionModule.getReservedClusterStateService(), - environment + environment, + fileSettingsHealthIndicatorService ); RestoreService restoreService = new RestoreService( @@ -1100,9 +1101,7 @@ private void construct( threadPool, scriptService, bigArrays, - searchModule.getRankFeatureShardPhase(), searchModule.getFetchPhase(), - responseCollectorService, circuitBreakerService, systemIndices.getExecutorSelector(), telemetryProvider.getTracer() @@ -1133,7 +1132,8 @@ private void construct( featureService, threadPool, telemetryProvider, - repositoriesService + repositoriesService, + fileSettingsHealthIndicatorService ) ); @@ -1305,7 +1305,8 @@ private Module loadDiagnosticServices( FeatureService featureService, ThreadPool threadPool, TelemetryProvider telemetryProvider, - RepositoriesService repositoriesService + RepositoriesService repositoriesService, + FileSettingsHealthIndicatorService fileSettingsHealthIndicatorService ) { MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); @@ -1320,7 +1321,8 @@ private Module loadDiagnosticServices( new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), new RepositoryIntegrityHealthIndicatorService(clusterService, featureService), new DiskHealthIndicatorService(clusterService, featureService), - new ShardsCapacityHealthIndicatorService(clusterService, featureService) + new ShardsCapacityHealthIndicatorService(clusterService, featureService), + fileSettingsHealthIndicatorService ); var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()); diff --git a/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java b/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java index f18655afb8f02..4b7524a7ac011 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java +++ b/server/src/main/java/org/elasticsearch/node/NodeServiceProvider.java @@ -27,6 +27,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.script.ScriptContext; @@ -34,7 +35,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.rank.feature.RankFeatureShardPhase; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; @@ -51,9 +51,9 @@ */ class NodeServiceProvider { - PluginsService newPluginService(Environment environment, Settings settings) { + PluginsService newPluginService(Environment initialEnvironment, PluginsLoader pluginsLoader) { // this creates a PluginsService with an empty list of classpath plugins - return new PluginsService(settings, environment.configFile(), environment.modulesFile(), environment.pluginsFile()); + return new PluginsService(initialEnvironment.settings(), initialEnvironment.configFile(), pluginsLoader); } ScriptService newScriptService( @@ -118,9 +118,7 @@ SearchService newSearchService( ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, - RankFeatureShardPhase rankFeatureShardPhase, FetchPhase fetchPhase, - ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, ExecutorSelector executorSelector, Tracer tracer @@ -131,9 +129,7 @@ SearchService newSearchService( threadPool, scriptService, bigArrays, - rankFeatureShardPhase, fetchPhase, - responseCollectorService, circuitBreakerService, executorSelector, tracer diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java new file mode 100644 index 0000000000000..aa21e5c64d903 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java @@ -0,0 +1,480 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.plugins; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.jdk.ModuleQualifiedExportsService; + +import java.io.IOException; +import java.lang.ModuleLayer.Controller; +import java.lang.module.Configuration; +import java.lang.module.ModuleFinder; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Stream; + +import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; +import static org.elasticsearch.jdk.ModuleQualifiedExportsService.addExportsService; +import static org.elasticsearch.jdk.ModuleQualifiedExportsService.exposeQualifiedExportsAndOpens; + +/** + * This class is used to load modules and module layers for each plugin during + * node initialization prior to enablement of entitlements. This allows entitlements + * to have all the plugin information they need prior to starting. + */ +public class PluginsLoader { + + /** + * Contains information about the {@link ClassLoader} required to load a plugin + */ + public interface PluginLayer { + /** + * @return Information about the bundle of jars used in this plugin + */ + PluginBundle pluginBundle(); + + /** + * @return The {@link ClassLoader} used to instantiate the main class for the plugin + */ + ClassLoader pluginClassLoader(); + } + + /** + * Contains information about the {@link ClassLoader}s and {@link ModuleLayer} required for loading a plugin + * @param pluginBundle Information about the bundle of jars used in this plugin + * @param pluginClassLoader The {@link ClassLoader} used to instantiate the main class for the plugin + * @param spiClassLoader The exported {@link ClassLoader} visible to other Java modules + * @param spiModuleLayer The exported {@link ModuleLayer} visible to other Java modules + */ + private record LoadedPluginLayer( + PluginBundle pluginBundle, + ClassLoader pluginClassLoader, + ClassLoader spiClassLoader, + ModuleLayer spiModuleLayer + ) implements PluginLayer { + + public LoadedPluginLayer { + Objects.requireNonNull(pluginBundle); + Objects.requireNonNull(pluginClassLoader); + Objects.requireNonNull(spiClassLoader); + Objects.requireNonNull(spiModuleLayer); + } + } + + /** + * Tuple of module layer and loader. + * Modular Plugins have a plugin specific loader and layer. + * Non-Modular plugins have a plugin specific loader and the boot layer. + */ + public record LayerAndLoader(ModuleLayer layer, ClassLoader loader) { + + public LayerAndLoader { + Objects.requireNonNull(layer); + Objects.requireNonNull(loader); + } + + public static LayerAndLoader ofLoader(ClassLoader loader) { + return new LayerAndLoader(ModuleLayer.boot(), loader); + } + } + + private static final Logger logger = LogManager.getLogger(PluginsLoader.class); + private static final Module serverModule = PluginsLoader.class.getModule(); + + private final List moduleDescriptors; + private final List pluginDescriptors; + private final Map loadedPluginLayers; + + /** + * Constructs a new PluginsLoader + * + * @param modulesDirectory The directory modules exist in, or null if modules should not be loaded from the filesystem + * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem + */ + public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path pluginsDirectory) { + return createPluginsLoader(modulesDirectory, pluginsDirectory, true); + } + + /** + * Constructs a new PluginsLoader + * + * @param modulesDirectory The directory modules exist in, or null if modules should not be loaded from the filesystem + * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem + * @param withServerExports {@code true} to add server module exports + */ + public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path pluginsDirectory, boolean withServerExports) { + Map> qualifiedExports; + if (withServerExports) { + qualifiedExports = new HashMap<>(ModuleQualifiedExportsService.getBootServices()); + addServerExportsService(qualifiedExports); + } else { + qualifiedExports = Collections.emptyMap(); + } + + Set seenBundles = new LinkedHashSet<>(); + + // load (elasticsearch) module layers + List moduleDescriptors; + if (modulesDirectory != null) { + try { + Set modules = PluginsUtils.getModuleBundles(modulesDirectory); + moduleDescriptors = modules.stream().map(PluginBundle::pluginDescriptor).toList(); + seenBundles.addAll(modules); + } catch (IOException ex) { + throw new IllegalStateException("Unable to initialize modules", ex); + } + } else { + moduleDescriptors = Collections.emptyList(); + } + + // load plugin layers + List pluginDescriptors; + if (pluginsDirectory != null) { + try { + // TODO: remove this leniency, but tests bogusly rely on it + if (isAccessibleDirectory(pluginsDirectory, logger)) { + PluginsUtils.checkForFailedPluginRemovals(pluginsDirectory); + Set plugins = PluginsUtils.getPluginBundles(pluginsDirectory); + pluginDescriptors = plugins.stream().map(PluginBundle::pluginDescriptor).toList(); + seenBundles.addAll(plugins); + } else { + pluginDescriptors = Collections.emptyList(); + } + } catch (IOException ex) { + throw new IllegalStateException("Unable to initialize plugins", ex); + } + } else { + pluginDescriptors = Collections.emptyList(); + } + + Map loadedPluginLayers = new LinkedHashMap<>(); + Map> transitiveUrls = new HashMap<>(); + List sortedBundles = PluginsUtils.sortBundles(seenBundles); + if (sortedBundles.isEmpty() == false) { + Set systemLoaderURLs = JarHell.parseModulesAndClassPath(); + for (PluginBundle bundle : sortedBundles) { + PluginsUtils.checkBundleJarHell(systemLoaderURLs, bundle, transitiveUrls); + loadPluginLayer(bundle, loadedPluginLayers, qualifiedExports); + } + } + + return new PluginsLoader(moduleDescriptors, pluginDescriptors, loadedPluginLayers); + } + + PluginsLoader( + List moduleDescriptors, + List pluginDescriptors, + Map loadedPluginLayers + ) { + this.moduleDescriptors = moduleDescriptors; + this.pluginDescriptors = pluginDescriptors; + this.loadedPluginLayers = loadedPluginLayers; + } + + public List moduleDescriptors() { + return moduleDescriptors; + } + + public List pluginDescriptors() { + return pluginDescriptors; + } + + public Stream pluginLayers() { + return loadedPluginLayers.values().stream().map(Function.identity()); + } + + private static void loadPluginLayer( + PluginBundle bundle, + Map loaded, + Map> qualifiedExports + ) { + String name = bundle.plugin.getName(); + logger.debug(() -> "Loading bundle: " + name); + + PluginsUtils.verifyCompatibility(bundle.plugin); + + // collect the list of extended plugins + List extendedPlugins = new ArrayList<>(); + for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { + LoadedPluginLayer extendedPlugin = loaded.get(extendedPluginName); + assert extendedPlugin != null; + assert extendedPlugin.spiClassLoader() != null : "All non-classpath plugins should be loaded with a classloader"; + extendedPlugins.add(extendedPlugin); + } + + final ClassLoader parentLoader = ExtendedPluginsClassLoader.create( + PluginsLoader.class.getClassLoader(), + extendedPlugins.stream().map(LoadedPluginLayer::spiClassLoader).toList() + ); + LayerAndLoader spiLayerAndLoader = null; + if (bundle.hasSPI()) { + spiLayerAndLoader = createSPI(bundle, parentLoader, extendedPlugins, qualifiedExports); + } + + final ClassLoader pluginParentLoader = spiLayerAndLoader == null ? parentLoader : spiLayerAndLoader.loader(); + final LayerAndLoader pluginLayerAndLoader = createPlugin( + bundle, + pluginParentLoader, + extendedPlugins, + spiLayerAndLoader, + qualifiedExports + ); + final ClassLoader pluginClassLoader = pluginLayerAndLoader.loader(); + + if (spiLayerAndLoader == null) { + // use full implementation for plugins extending this one + spiLayerAndLoader = pluginLayerAndLoader; + } + + loaded.put(name, new LoadedPluginLayer(bundle, pluginClassLoader, spiLayerAndLoader.loader, spiLayerAndLoader.layer)); + } + + static LayerAndLoader createSPI( + PluginBundle bundle, + ClassLoader parentLoader, + List extendedPlugins, + Map> qualifiedExports + ) { + final PluginDescriptor plugin = bundle.plugin; + if (plugin.getModuleName().isPresent()) { + logger.debug(() -> "Loading bundle: " + plugin.getName() + ", creating spi, modular"); + return createSpiModuleLayer( + bundle.spiUrls, + parentLoader, + extendedPlugins.stream().map(LoadedPluginLayer::spiModuleLayer).toList(), + qualifiedExports + ); + } else { + logger.debug(() -> "Loading bundle: " + plugin.getName() + ", creating spi, non-modular"); + return LayerAndLoader.ofLoader(URLClassLoader.newInstance(bundle.spiUrls.toArray(new URL[0]), parentLoader)); + } + } + + static LayerAndLoader createPlugin( + PluginBundle bundle, + ClassLoader pluginParentLoader, + List extendedPlugins, + LayerAndLoader spiLayerAndLoader, + Map> qualifiedExports + ) { + final PluginDescriptor plugin = bundle.plugin; + if (plugin.getModuleName().isPresent()) { + logger.debug(() -> "Loading bundle: " + plugin.getName() + ", modular"); + var parentLayers = Stream.concat( + Stream.ofNullable(spiLayerAndLoader != null ? spiLayerAndLoader.layer() : null), + extendedPlugins.stream().map(LoadedPluginLayer::spiModuleLayer) + ).toList(); + return createPluginModuleLayer(bundle, pluginParentLoader, parentLayers, qualifiedExports); + } else if (plugin.isStable()) { + logger.debug(() -> "Loading bundle: " + plugin.getName() + ", non-modular as synthetic module"); + return LayerAndLoader.ofLoader( + UberModuleClassLoader.getInstance( + pluginParentLoader, + ModuleLayer.boot(), + "synthetic." + toModuleName(plugin.getName()), + bundle.allUrls, + Set.of("org.elasticsearch.server") // TODO: instead of denying server, allow only jvm + stable API modules + ) + ); + } else { + logger.debug(() -> "Loading bundle: " + plugin.getName() + ", non-modular"); + return LayerAndLoader.ofLoader(URLClassLoader.newInstance(bundle.urls.toArray(URL[]::new), pluginParentLoader)); + } + } + + static LayerAndLoader createSpiModuleLayer( + Set urls, + ClassLoader parentLoader, + List parentLayers, + Map> qualifiedExports + ) { + // assert bundle.plugin.getModuleName().isPresent(); + return createModuleLayer( + null, // no entry point + spiModuleName(urls), + urlsToPaths(urls), + parentLoader, + parentLayers, + qualifiedExports + ); + } + + static LayerAndLoader createPluginModuleLayer( + PluginBundle bundle, + ClassLoader parentLoader, + List parentLayers, + Map> qualifiedExports + ) { + assert bundle.plugin.getModuleName().isPresent(); + return createModuleLayer( + bundle.plugin.getClassname(), + bundle.plugin.getModuleName().get(), + urlsToPaths(bundle.urls), + parentLoader, + parentLayers, + qualifiedExports + ); + } + + static LayerAndLoader createModuleLayer( + String className, + String moduleName, + Path[] paths, + ClassLoader parentLoader, + List parentLayers, + Map> qualifiedExports + ) { + logger.debug(() -> "Loading bundle: creating module layer and loader for module " + moduleName); + var finder = ModuleFinder.of(paths); + + var configuration = Configuration.resolveAndBind( + ModuleFinder.of(), + parentConfigurationOrBoot(parentLayers), + finder, + Set.of(moduleName) + ); + var controller = privilegedDefineModulesWithOneLoader(configuration, parentLayersOrBoot(parentLayers), parentLoader); + var pluginModule = controller.layer().findModule(moduleName).get(); + ensureEntryPointAccessible(controller, pluginModule, className); + // export/open upstream modules to this plugin module + exposeQualifiedExportsAndOpens(pluginModule, qualifiedExports); + // configure qualified exports/opens to other modules/plugins + addPluginExportsServices(qualifiedExports, controller); + logger.debug(() -> "Loading bundle: created module layer and loader for module " + moduleName); + return new LayerAndLoader(controller.layer(), privilegedFindLoader(controller.layer(), moduleName)); + } + + /** Determines the module name of the SPI module, given its URL. */ + static String spiModuleName(Set spiURLS) { + ModuleFinder finder = ModuleFinder.of(urlsToPaths(spiURLS)); + var mrefs = finder.findAll(); + assert mrefs.size() == 1 : "Expected a single module, got:" + mrefs; + return mrefs.stream().findFirst().get().descriptor().name(); + } + + // package-visible for testing + static String toModuleName(String name) { + String result = name.replaceAll("\\W+", ".") // replace non-alphanumeric character strings with dots + .replaceAll("(^[^A-Za-z_]*)", "") // trim non-alpha or underscore characters from start + .replaceAll("\\.$", "") // trim trailing dot + .toLowerCase(Locale.getDefault()); + assert ModuleSupport.isPackageName(result); + return result; + } + + static final String toPackageName(String className) { + assert className.endsWith(".") == false; + int index = className.lastIndexOf('.'); + if (index == -1) { + throw new IllegalStateException("invalid class name:" + className); + } + return className.substring(0, index); + } + + @SuppressForbidden(reason = "I need to convert URL's to Paths") + static final Path[] urlsToPaths(Set urls) { + return urls.stream().map(PluginsLoader::uncheckedToURI).map(PathUtils::get).toArray(Path[]::new); + } + + static final URI uncheckedToURI(URL url) { + try { + return url.toURI(); + } catch (URISyntaxException e) { + throw new AssertionError(new IOException(e)); + } + } + + private static List parentConfigurationOrBoot(List parentLayers) { + if (parentLayers == null || parentLayers.isEmpty()) { + return List.of(ModuleLayer.boot().configuration()); + } else { + return parentLayers.stream().map(ModuleLayer::configuration).toList(); + } + } + + /** Ensures that the plugins main class (its entry point), if any, is accessible to the server. */ + private static void ensureEntryPointAccessible(Controller controller, Module pluginModule, String className) { + if (className != null) { + controller.addOpens(pluginModule, toPackageName(className), serverModule); + } + } + + @SuppressWarnings("removal") + static Controller privilegedDefineModulesWithOneLoader(Configuration cf, List parentLayers, ClassLoader parentLoader) { + return AccessController.doPrivileged( + (PrivilegedAction) () -> ModuleLayer.defineModulesWithOneLoader(cf, parentLayers, parentLoader) + ); + } + + @SuppressWarnings("removal") + static ClassLoader privilegedFindLoader(ModuleLayer layer, String name) { + return AccessController.doPrivileged((PrivilegedAction) () -> layer.findLoader(name)); + } + + private static List parentLayersOrBoot(List parentLayers) { + if (parentLayers == null || parentLayers.isEmpty()) { + return List.of(ModuleLayer.boot()); + } else { + return parentLayers; + } + } + + private static void addServerExportsService(Map> qualifiedExports) { + var exportsService = new ModuleQualifiedExportsService(serverModule) { + @Override + protected void addExports(String pkg, Module target) { + serverModule.addExports(pkg, target); + } + + @Override + protected void addOpens(String pkg, Module target) { + serverModule.addOpens(pkg, target); + } + }; + addExportsService(qualifiedExports, exportsService, serverModule.getName()); + } + + private static void addPluginExportsServices(Map> qualifiedExports, Controller controller) { + for (Module module : controller.layer().modules()) { + var exportsService = new ModuleQualifiedExportsService(module) { + @Override + protected void addExports(String pkg, Module target) { + controller.addExports(module, pkg, target); + } + + @Override + protected void addOpens(String pkg, Module target) { + controller.addOpens(module, pkg, target); + } + }; + addExportsService(qualifiedExports, exportsService, module.getName()); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index d5dd6d62d615e..6ef3cd17ba2e9 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -23,34 +23,22 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; -import org.elasticsearch.jdk.JarHell; -import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.node.ReportingService; +import org.elasticsearch.plugins.PluginsLoader.PluginLayer; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.plugins.spi.SPIClassIterator; import java.io.IOException; -import java.lang.ModuleLayer.Controller; -import java.lang.module.Configuration; -import java.lang.module.ModuleFinder; import java.lang.reflect.Constructor; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.net.URLClassLoader; import java.nio.file.Path; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -63,10 +51,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; -import static org.elasticsearch.jdk.ModuleQualifiedExportsService.addExportsService; -import static org.elasticsearch.jdk.ModuleQualifiedExportsService.exposeQualifiedExportsAndOpens; - public class PluginsService implements ReportingService { public StablePluginsRegistry getStablePluginRegistry() { @@ -77,33 +61,18 @@ public StablePluginsRegistry getStablePluginRegistry() { * A loaded plugin is one for which Elasticsearch has successfully constructed an instance of the plugin's class * @param descriptor Metadata about the plugin, usually loaded from plugin properties * @param instance The constructed instance of the plugin's main class - * @param loader The classloader for the plugin - * @param layer The module layer for the plugin */ - record LoadedPlugin(PluginDescriptor descriptor, Plugin instance, ClassLoader loader, ModuleLayer layer) { + record LoadedPlugin(PluginDescriptor descriptor, Plugin instance, ClassLoader classLoader) { LoadedPlugin { Objects.requireNonNull(descriptor); Objects.requireNonNull(instance); - Objects.requireNonNull(loader); - Objects.requireNonNull(layer); - } - - /** - * Creates a loaded classpath plugin. A classpath plugin is a plugin loaded - * by the system classloader and defined to the unnamed module of the boot layer. - */ - LoadedPlugin(PluginDescriptor descriptor, Plugin instance) { - this(descriptor, instance, PluginsService.class.getClassLoader(), ModuleLayer.boot()); } } private static final Logger logger = LogManager.getLogger(PluginsService.class); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(PluginsService.class); - private final Settings settings; - private final Path configPath; - /** * We keep around a list of plugins and modules. The order of * this list is that which the plugins and modules were loaded in. @@ -117,69 +86,32 @@ record LoadedPlugin(PluginDescriptor descriptor, Plugin instance, ClassLoader lo /** * Constructs a new PluginService * - * @param settings The settings of the system - * @param modulesDirectory The directory modules exist in, or null if modules should not be loaded from the filesystem - * @param pluginsDirectory The directory plugins exist in, or null if plugins should not be loaded from the filesystem + * @param settings The settings for this node + * @param configPath The configuration path for this node + * @param pluginsLoader the information required to complete loading of plugins */ - @SuppressWarnings("this-escape") - public PluginsService(Settings settings, Path configPath, Path modulesDirectory, Path pluginsDirectory) { - this.settings = settings; - this.configPath = configPath; - - Map> qualifiedExports = new HashMap<>(ModuleQualifiedExportsService.getBootServices()); - addServerExportsService(qualifiedExports); - - Set seenBundles = new LinkedHashSet<>(); - - // load modules - List modulesList = new ArrayList<>(); - Set moduleNameList = new HashSet<>(); - if (modulesDirectory != null) { - try { - Set modules = PluginsUtils.getModuleBundles(modulesDirectory); - modules.stream().map(PluginBundle::pluginDescriptor).forEach(m -> { - modulesList.add(m); - moduleNameList.add(m.getName()); - }); - seenBundles.addAll(modules); - } catch (IOException ex) { - throw new IllegalStateException("Unable to initialize modules", ex); - } - } + public PluginsService(Settings settings, Path configPath, PluginsLoader pluginsLoader) { + Map loadedPlugins = loadPluginBundles(settings, configPath, pluginsLoader); - // load plugins - List pluginsList = new ArrayList<>(); - if (pluginsDirectory != null) { - try { - // TODO: remove this leniency, but tests bogusly rely on it - if (isAccessibleDirectory(pluginsDirectory, logger)) { - PluginsUtils.checkForFailedPluginRemovals(pluginsDirectory); - Set plugins = PluginsUtils.getPluginBundles(pluginsDirectory); - plugins.stream().map(PluginBundle::pluginDescriptor).forEach(pluginsList::add); - seenBundles.addAll(plugins); - } - } catch (IOException ex) { - throw new IllegalStateException("Unable to initialize plugins", ex); - } - } - - LinkedHashMap loadedPlugins = loadBundles(seenBundles, qualifiedExports); + var modulesDescriptors = pluginsLoader.moduleDescriptors(); + var pluginDescriptors = pluginsLoader.pluginDescriptors(); var inspector = PluginIntrospector.getInstance(); - this.info = new PluginsAndModules(getRuntimeInfos(inspector, pluginsList, loadedPlugins), modulesList); + this.info = new PluginsAndModules(getRuntimeInfos(inspector, pluginDescriptors, loadedPlugins), modulesDescriptors); this.plugins = List.copyOf(loadedPlugins.values()); - checkDeprecations(inspector, pluginsList, loadedPlugins); + checkDeprecations(inspector, pluginDescriptors, loadedPlugins); checkMandatoryPlugins( - pluginsList.stream().map(PluginDescriptor::getName).collect(Collectors.toSet()), + pluginDescriptors.stream().map(PluginDescriptor::getName).collect(Collectors.toSet()), new HashSet<>(MANDATORY_SETTING.get(settings)) ); // we don't log jars in lib/ we really shouldn't log modules, // but for now: just be transparent so we can debug any potential issues + Set moduleNames = new HashSet<>(modulesDescriptors.stream().map(PluginDescriptor::getName).toList()); for (String name : loadedPlugins.keySet()) { - if (moduleNameList.contains(name)) { + if (moduleNames.contains(name)) { logger.info("loaded module [{}]", name); } else { logger.info("loaded plugin [{}]", name); @@ -282,23 +214,11 @@ protected List plugins() { return this.plugins; } - private LinkedHashMap loadBundles( - Set bundles, - Map> qualifiedExports - ) { - LinkedHashMap loaded = new LinkedHashMap<>(); - Map> transitiveUrls = new HashMap<>(); - List sortedBundles = PluginsUtils.sortBundles(bundles); - if (sortedBundles.isEmpty() == false) { - Set systemLoaderURLs = JarHell.parseModulesAndClassPath(); - for (PluginBundle bundle : sortedBundles) { - PluginsUtils.checkBundleJarHell(systemLoaderURLs, bundle, transitiveUrls); - loadBundle(bundle, loaded, qualifiedExports); - } - } - - loadExtensions(loaded.values()); - return loaded; + private Map loadPluginBundles(Settings settings, Path configPath, PluginsLoader pluginsLoader) { + Map loadedPlugins = new LinkedHashMap<>(); + pluginsLoader.pluginLayers().forEach(pl -> loadBundle(pl, loadedPlugins, settings, configPath)); + loadExtensions(loadedPlugins.values()); + return loadedPlugins; } // package-private for test visibility @@ -443,68 +363,43 @@ private static String extensionConstructorMessage(Class extensi return "constructor for extension [" + extensionClass.getName() + "] of type [" + extensionPointType.getName() + "]"; } - private void loadBundle( - PluginBundle bundle, - Map loaded, - Map> qualifiedExports - ) { - String name = bundle.plugin.getName(); - logger.debug(() -> "Loading bundle: " + name); - - PluginsUtils.verifyCompatibility(bundle.plugin); + private void loadBundle(PluginLayer pluginLayer, Map loadedPlugins, Settings settings, Path configPath) { + String name = pluginLayer.pluginBundle().plugin.getName(); + logger.debug(() -> "Loading plugin bundle: " + name); - // collect the list of extended plugins + // validate the list of extended plugins List extendedPlugins = new ArrayList<>(); - for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { - LoadedPlugin extendedPlugin = loaded.get(extendedPluginName); + for (String extendedPluginName : pluginLayer.pluginBundle().plugin.getExtendedPlugins()) { + LoadedPlugin extendedPlugin = loadedPlugins.get(extendedPluginName); assert extendedPlugin != null; if (ExtensiblePlugin.class.isInstance(extendedPlugin.instance()) == false) { throw new IllegalStateException("Plugin [" + name + "] cannot extend non-extensible plugin [" + extendedPluginName + "]"); } - assert extendedPlugin.loader() != null : "All non-classpath plugins should be loaded with a classloader"; extendedPlugins.add(extendedPlugin); logger.debug( - () -> "Loading bundle: " + name + ", ext plugins: " + extendedPlugins.stream().map(lp -> lp.descriptor().getName()).toList() + () -> "Loading plugin bundle: " + + name + + ", ext plugins: " + + extendedPlugins.stream().map(lp -> lp.descriptor().getName()).toList() ); } - final ClassLoader parentLoader = ExtendedPluginsClassLoader.create( - getClass().getClassLoader(), - extendedPlugins.stream().map(LoadedPlugin::loader).toList() - ); - LayerAndLoader spiLayerAndLoader = null; - if (bundle.hasSPI()) { - spiLayerAndLoader = createSPI(bundle, parentLoader, extendedPlugins, qualifiedExports); - } - - final ClassLoader pluginParentLoader = spiLayerAndLoader == null ? parentLoader : spiLayerAndLoader.loader(); - final LayerAndLoader pluginLayerAndLoader = createPlugin( - bundle, - pluginParentLoader, - extendedPlugins, - spiLayerAndLoader, - qualifiedExports - ); - final ClassLoader pluginClassLoader = pluginLayerAndLoader.loader(); - - if (spiLayerAndLoader == null) { - // use full implementation for plugins extending this one - spiLayerAndLoader = pluginLayerAndLoader; - } + PluginBundle pluginBundle = pluginLayer.pluginBundle(); + ClassLoader pluginClassLoader = pluginLayer.pluginClassLoader(); // reload SPI with any new services from the plugin - reloadLuceneSPI(pluginClassLoader); + reloadLuceneSPI(pluginLayer.pluginClassLoader()); ClassLoader cl = Thread.currentThread().getContextClassLoader(); try { // Set context class loader to plugin's class loader so that plugins // that have dependencies with their own SPI endpoints have a chance to load // and initialize them appropriately. - privilegedSetContextClassLoader(pluginClassLoader); + privilegedSetContextClassLoader(pluginLayer.pluginClassLoader()); Plugin plugin; - if (bundle.pluginDescriptor().isStable()) { - stablePluginsRegistry.scanBundleForStablePlugins(bundle, pluginClassLoader); + if (pluginBundle.pluginDescriptor().isStable()) { + stablePluginsRegistry.scanBundleForStablePlugins(pluginBundle, pluginClassLoader); /* Contrary to old plugins we don't need an instance of the plugin here. Stable plugin register components (like CharFilterFactory) in stable plugin registry, which is then used in AnalysisModule @@ -514,16 +409,16 @@ Stable plugin register components (like CharFilterFactory) in stable plugin regi We need to pass a name though so that we can show that a plugin was loaded (via cluster state api) This might need to be revisited once support for settings is added */ - plugin = new StablePluginPlaceHolder(bundle.plugin.getName()); + plugin = new StablePluginPlaceHolder(pluginBundle.plugin.getName()); } else { - Class pluginClass = loadPluginClass(bundle.plugin.getClassname(), pluginClassLoader); + Class pluginClass = loadPluginClass(pluginBundle.plugin.getClassname(), pluginClassLoader); if (pluginClassLoader != pluginClass.getClassLoader()) { throw new IllegalStateException( "Plugin [" + name + "] must reference a class loader local Plugin class [" - + bundle.plugin.getClassname() + + pluginBundle.plugin.getClassname() + "] (class loader [" + pluginClass.getClassLoader() + "])" @@ -531,75 +426,12 @@ We need to pass a name though so that we can show that a plugin was loaded (via } plugin = loadPlugin(pluginClass, settings, configPath); } - loaded.put(name, new LoadedPlugin(bundle.plugin, plugin, spiLayerAndLoader.loader(), spiLayerAndLoader.layer())); + loadedPlugins.put(name, new LoadedPlugin(pluginBundle.plugin, plugin, pluginLayer.pluginClassLoader())); } finally { privilegedSetContextClassLoader(cl); } } - static LayerAndLoader createSPI( - PluginBundle bundle, - ClassLoader parentLoader, - List extendedPlugins, - Map> qualifiedExports - ) { - final PluginDescriptor plugin = bundle.plugin; - if (plugin.getModuleName().isPresent()) { - logger.debug(() -> "Loading bundle: " + plugin.getName() + ", creating spi, modular"); - return createSpiModuleLayer( - bundle.spiUrls, - parentLoader, - extendedPlugins.stream().map(LoadedPlugin::layer).toList(), - qualifiedExports - ); - } else { - logger.debug(() -> "Loading bundle: " + plugin.getName() + ", creating spi, non-modular"); - return LayerAndLoader.ofLoader(URLClassLoader.newInstance(bundle.spiUrls.toArray(new URL[0]), parentLoader)); - } - } - - static LayerAndLoader createPlugin( - PluginBundle bundle, - ClassLoader pluginParentLoader, - List extendedPlugins, - LayerAndLoader spiLayerAndLoader, - Map> qualifiedExports - ) { - final PluginDescriptor plugin = bundle.plugin; - if (plugin.getModuleName().isPresent()) { - logger.debug(() -> "Loading bundle: " + plugin.getName() + ", modular"); - var parentLayers = Stream.concat( - Stream.ofNullable(spiLayerAndLoader != null ? spiLayerAndLoader.layer() : null), - extendedPlugins.stream().map(LoadedPlugin::layer) - ).toList(); - return createPluginModuleLayer(bundle, pluginParentLoader, parentLayers, qualifiedExports); - } else if (plugin.isStable()) { - logger.debug(() -> "Loading bundle: " + plugin.getName() + ", non-modular as synthetic module"); - return LayerAndLoader.ofLoader( - UberModuleClassLoader.getInstance( - pluginParentLoader, - ModuleLayer.boot(), - "synthetic." + toModuleName(plugin.getName()), - bundle.allUrls, - Set.of("org.elasticsearch.server") // TODO: instead of denying server, allow only jvm + stable API modules - ) - ); - } else { - logger.debug(() -> "Loading bundle: " + plugin.getName() + ", non-modular"); - return LayerAndLoader.ofLoader(URLClassLoader.newInstance(bundle.urls.toArray(URL[]::new), pluginParentLoader)); - } - } - - // package-visible for testing - static String toModuleName(String name) { - String result = name.replaceAll("\\W+", ".") // replace non-alphanumeric character strings with dots - .replaceAll("(^[^A-Za-z_]*)", "") // trim non-alpha or underscore characters from start - .replaceAll("\\.$", "") // trim trailing dot - .toLowerCase(Locale.getDefault()); - assert ModuleSupport.isPackageName(result); - return result; - } - private static void checkDeprecations( PluginIntrospector inspector, List pluginDescriptors, @@ -706,173 +538,6 @@ public final Stream filterPlugins(Class type) { return plugins().stream().filter(x -> type.isAssignableFrom(x.instance().getClass())).map(p -> ((T) p.instance())); } - static LayerAndLoader createPluginModuleLayer( - PluginBundle bundle, - ClassLoader parentLoader, - List parentLayers, - Map> qualifiedExports - ) { - assert bundle.plugin.getModuleName().isPresent(); - return createModuleLayer( - bundle.plugin.getClassname(), - bundle.plugin.getModuleName().get(), - urlsToPaths(bundle.urls), - parentLoader, - parentLayers, - qualifiedExports - ); - } - - static final LayerAndLoader createSpiModuleLayer( - Set urls, - ClassLoader parentLoader, - List parentLayers, - Map> qualifiedExports - ) { - // assert bundle.plugin.getModuleName().isPresent(); - return createModuleLayer( - null, // no entry point - spiModuleName(urls), - urlsToPaths(urls), - parentLoader, - parentLayers, - qualifiedExports - ); - } - - private static final Module serverModule = PluginsService.class.getModule(); - - static LayerAndLoader createModuleLayer( - String className, - String moduleName, - Path[] paths, - ClassLoader parentLoader, - List parentLayers, - Map> qualifiedExports - ) { - logger.debug(() -> "Loading bundle: creating module layer and loader for module " + moduleName); - var finder = ModuleFinder.of(paths); - - var configuration = Configuration.resolveAndBind( - ModuleFinder.of(), - parentConfigurationOrBoot(parentLayers), - finder, - Set.of(moduleName) - ); - var controller = privilegedDefineModulesWithOneLoader(configuration, parentLayersOrBoot(parentLayers), parentLoader); - var pluginModule = controller.layer().findModule(moduleName).get(); - ensureEntryPointAccessible(controller, pluginModule, className); - // export/open upstream modules to this plugin module - exposeQualifiedExportsAndOpens(pluginModule, qualifiedExports); - // configure qualified exports/opens to other modules/plugins - addPluginExportsServices(qualifiedExports, controller); - logger.debug(() -> "Loading bundle: created module layer and loader for module " + moduleName); - return new LayerAndLoader(controller.layer(), privilegedFindLoader(controller.layer(), moduleName)); - } - - private static List parentLayersOrBoot(List parentLayers) { - if (parentLayers == null || parentLayers.isEmpty()) { - return List.of(ModuleLayer.boot()); - } else { - return parentLayers; - } - } - - private static List parentConfigurationOrBoot(List parentLayers) { - if (parentLayers == null || parentLayers.isEmpty()) { - return List.of(ModuleLayer.boot().configuration()); - } else { - return parentLayers.stream().map(ModuleLayer::configuration).toList(); - } - } - - /** Ensures that the plugins main class (its entry point), if any, is accessible to the server. */ - private static void ensureEntryPointAccessible(Controller controller, Module pluginModule, String className) { - if (className != null) { - controller.addOpens(pluginModule, toPackageName(className), serverModule); - } - } - - protected void addServerExportsService(Map> qualifiedExports) { - final Module serverModule = PluginsService.class.getModule(); - var exportsService = new ModuleQualifiedExportsService(serverModule) { - @Override - protected void addExports(String pkg, Module target) { - serverModule.addExports(pkg, target); - } - - @Override - protected void addOpens(String pkg, Module target) { - serverModule.addOpens(pkg, target); - } - }; - addExportsService(qualifiedExports, exportsService, serverModule.getName()); - } - - private static void addPluginExportsServices(Map> qualifiedExports, Controller controller) { - for (Module module : controller.layer().modules()) { - var exportsService = new ModuleQualifiedExportsService(module) { - @Override - protected void addExports(String pkg, Module target) { - controller.addExports(module, pkg, target); - } - - @Override - protected void addOpens(String pkg, Module target) { - controller.addOpens(module, pkg, target); - } - }; - addExportsService(qualifiedExports, exportsService, module.getName()); - } - } - - /** Determines the module name of the SPI module, given its URL. */ - static String spiModuleName(Set spiURLS) { - ModuleFinder finder = ModuleFinder.of(urlsToPaths(spiURLS)); - var mrefs = finder.findAll(); - assert mrefs.size() == 1 : "Expected a single module, got:" + mrefs; - return mrefs.stream().findFirst().get().descriptor().name(); - } - - /** - * Tuple of module layer and loader. - * Modular Plugins have a plugin specific loader and layer. - * Non-Modular plugins have a plugin specific loader and the boot layer. - */ - record LayerAndLoader(ModuleLayer layer, ClassLoader loader) { - - LayerAndLoader { - Objects.requireNonNull(layer); - Objects.requireNonNull(loader); - } - - static LayerAndLoader ofLoader(ClassLoader loader) { - return new LayerAndLoader(ModuleLayer.boot(), loader); - } - } - - @SuppressForbidden(reason = "I need to convert URL's to Paths") - static final Path[] urlsToPaths(Set urls) { - return urls.stream().map(PluginsService::uncheckedToURI).map(PathUtils::get).toArray(Path[]::new); - } - - static final URI uncheckedToURI(URL url) { - try { - return url.toURI(); - } catch (URISyntaxException e) { - throw new AssertionError(new IOException(e)); - } - } - - static final String toPackageName(String className) { - assert className.endsWith(".") == false; - int index = className.lastIndexOf('.'); - if (index == -1) { - throw new IllegalStateException("invalid class name:" + className); - } - return className.substring(0, index); - } - @SuppressWarnings("removal") private static void privilegedSetContextClassLoader(ClassLoader loader) { AccessController.doPrivileged((PrivilegedAction) () -> { @@ -880,16 +545,4 @@ private static void privilegedSetContextClassLoader(ClassLoader loader) { return null; }); } - - @SuppressWarnings("removal") - static Controller privilegedDefineModulesWithOneLoader(Configuration cf, List parentLayers, ClassLoader parentLoader) { - return AccessController.doPrivileged( - (PrivilegedAction) () -> ModuleLayer.defineModulesWithOneLoader(cf, parentLayers, parentLoader) - ); - } - - @SuppressWarnings("removal") - static ClassLoader privilegedFindLoader(ModuleLayer layer, String name) { - return AccessController.doPrivileged((PrivilegedAction) () -> layer.findLoader(name)); - } } diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java index 44fb531f8610e..155cff57a0ebf 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java @@ -210,12 +210,12 @@ public static void checkForFailedPluginRemovals(final Path pluginsDirectory) thr } /** Get bundles for plugins installed in the given modules directory. */ - static Set getModuleBundles(Path modulesDirectory) throws IOException { + public static Set getModuleBundles(Path modulesDirectory) throws IOException { return findBundles(modulesDirectory, "module"); } /** Get bundles for plugins installed in the given plugins directory. */ - static Set getPluginBundles(final Path pluginsDirectory) throws IOException { + public static Set getPluginBundles(final Path pluginsDirectory) throws IOException { return findBundles(pluginsDirectory, "plugin"); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 2b95fbc69199f..1c4b7cfdab4ef 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -283,12 +283,22 @@ public RegisterRepositoryTask(final RepositoriesService repositoriesService, fin @Override public ClusterState execute(ClusterState currentState) { - RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata(request.name(), request.type(), request.settings()); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); RepositoriesMetadata repositories = RepositoriesMetadata.get(currentState); List repositoriesMetadata = new ArrayList<>(repositories.repositories().size() + 1); for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { - if (repositoryMetadata.name().equals(newRepositoryMetadata.name())) { + if (repositoryMetadata.name().equals(request.name())) { + final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( + request.name(), + // Copy the UUID from the existing instance rather than resetting it back to MISSING_UUID which would force us to + // re-read the RepositoryData to get it again. In principle the new RepositoryMetadata might point to a different + // underlying repository at this point, but if so that'll cause things to fail in clear ways and eventually (before + // writing anything) we'll read the RepositoryData again and update the UUID in the RepositoryMetadata to match. See + // also #109936. + repositoryMetadata.uuid(), + request.type(), + request.settings() + ); Repository existing = repositoriesService.repositories.get(request.name()); if (existing == null) { existing = repositoriesService.internalRepositories.get(request.name()); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b43fe05a541f6..f1c3d82b74cab 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -191,6 +191,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { private static final Logger logger = LogManager.getLogger(BlobStoreRepository.class); + private class ShutdownLogger { + // Creating a separate logger so that the log-level can be manipulated separately from the parent class. + private static final Logger shutdownLogger = LogManager.getLogger(ShutdownLogger.class); + } + protected volatile RepositoryMetadata metadata; protected final ThreadPool threadPool; @@ -200,6 +205,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final String STATELESS_SHARD_WRITE_THREAD_NAME = "stateless_shard_write"; public static final String STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME = "stateless_cluster_state"; public static final String STATELESS_SHARD_PREWARMING_THREAD_NAME = "stateless_prewarm"; + public static final String SEARCHABLE_SNAPSHOTS_CACHE_FETCH_ASYNC_THREAD_NAME = "searchable_snapshots_cache_fetch_async"; + public static final String SEARCHABLE_SNAPSHOTS_CACHE_PREWARMING_THREAD_NAME = "searchable_snapshots_cache_prewarming"; /** * Prefix for the name of the root {@link RepositoryData} blob. @@ -2183,7 +2190,9 @@ private void assertSnapshotOrStatelessPermittedThreadPool() { STATELESS_TRANSLOG_THREAD_NAME, STATELESS_SHARD_WRITE_THREAD_NAME, STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME, - STATELESS_SHARD_PREWARMING_THREAD_NAME + STATELESS_SHARD_PREWARMING_THREAD_NAME, + SEARCHABLE_SNAPSHOTS_CACHE_FETCH_ASYNC_THREAD_NAME, + SEARCHABLE_SNAPSHOTS_CACHE_PREWARMING_THREAD_NAME ); } @@ -3467,10 +3476,37 @@ private void doSnapshotShard(SnapshotShardContext context) { } private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, IndexShardSnapshotStatus snapshotStatus, String fileName) { + var shardSnapshotStage = snapshotStatus.getStage(); try { - snapshotStatus.ensureNotAborted(); + IndexShardSnapshotStatus.ensureNotAborted(shardSnapshotStage); + + if (shardSnapshotStage != IndexShardSnapshotStatus.Stage.INIT && shardSnapshotStage != IndexShardSnapshotStatus.Stage.STARTED) { + // A normally running shard snapshot should be in stage INIT or STARTED. And we know it's not in PAUSING or ABORTED because + // the ensureNotAborted() call above did not throw. The remaining options don't make sense, if they ever happen. + logger.error( + () -> Strings.format( + "Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]", + shardId, + snapshotId, + shardSnapshotStage + ) + ); + assert false; + } } catch (Exception e) { - logger.debug("[{}] [{}] {} on the file [{}], exiting", shardId, snapshotId, e.getMessage(), fileName); + // We want to see when a shard snapshot operation checks for and finds an interrupt signal during shutdown. A + // PausedSnapshotException indicates we're in shutdown because that's the only case when shard snapshots are signaled to pause. + // An AbortedSnapshotException may also occur during shutdown if an uncommon error occurs. + ShutdownLogger.shutdownLogger.debug( + () -> Strings.format( + "Shard snapshot operation is aborting. ShardId [%s], SnapshotID [%s], File [%s], Stage [%s]", + shardId, + snapshotId, + fileName, + shardSnapshotStage + ), + e + ); assert e instanceof AbortedSnapshotException || e instanceof PausedSnapshotException : e; throw e; } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index ae9ae6f8b5bf9..5f907572641a6 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -22,14 +22,27 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.file.MasterNodeFileWatchingService; import org.elasticsearch.env.Environment; +import org.elasticsearch.health.HealthIndicatorDetails; +import org.elasticsearch.health.HealthIndicatorImpact; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthIndicatorService; +import org.elasticsearch.health.SimpleHealthIndicatorDetails; +import org.elasticsearch.health.node.HealthInfo; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.BufferedInputStream; import java.io.IOException; import java.nio.file.Files; +import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.health.HealthStatus.GREEN; +import static org.elasticsearch.health.HealthStatus.YELLOW; +import static org.elasticsearch.health.ImpactArea.DEPLOYMENT_MANAGEMENT; import static org.elasticsearch.reservedstate.service.ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION; import static org.elasticsearch.reservedstate.service.ReservedStateVersionCheck.HIGHER_VERSION_ONLY; import static org.elasticsearch.xcontent.XContentType.JSON; @@ -53,6 +66,7 @@ public class FileSettingsService extends MasterNodeFileWatchingService implement public static final String NAMESPACE = "file_settings"; public static final String OPERATOR_DIRECTORY = "operator"; private final ReservedClusterStateService stateService; + private final FileSettingsHealthIndicatorService healthIndicatorService; /** * Constructs the {@link FileSettingsService} @@ -60,10 +74,21 @@ public class FileSettingsService extends MasterNodeFileWatchingService implement * @param clusterService so we can register ourselves as a cluster state change listener * @param stateService an instance of the immutable cluster state controller, so we can perform the cluster state changes * @param environment we need the environment to pull the location of the config and operator directories + * @param healthIndicatorService tracks the success or failure of file-based settings */ - public FileSettingsService(ClusterService clusterService, ReservedClusterStateService stateService, Environment environment) { + public FileSettingsService( + ClusterService clusterService, + ReservedClusterStateService stateService, + Environment environment, + FileSettingsHealthIndicatorService healthIndicatorService + ) { super(clusterService, environment.configFile().toAbsolutePath().resolve(OPERATOR_DIRECTORY).resolve(SETTINGS_FILE_NAME)); this.stateService = stateService; + this.healthIndicatorService = healthIndicatorService; + } + + public FileSettingsHealthIndicatorService healthIndicatorService() { + return healthIndicatorService; } /** @@ -121,6 +146,7 @@ protected boolean shouldRefreshFileState(ClusterState clusterState) { @Override protected void processFileChanges() throws ExecutionException, InterruptedException, IOException { logger.info("processing path [{}] for [{}]", watchedFile(), NAMESPACE); + healthIndicatorService.changeOccurred(); processFileChanges(HIGHER_VERSION_ONLY); } @@ -131,6 +157,7 @@ protected void processFileChanges() throws ExecutionException, InterruptedExcept @Override protected void processFileOnServiceStart() throws IOException, ExecutionException, InterruptedException { logger.info("processing path [{}] for [{}] on service start", watchedFile(), NAMESPACE); + healthIndicatorService.changeOccurred(); processFileChanges(HIGHER_OR_SAME_VERSION); } @@ -146,6 +173,16 @@ private void processFileChanges(ReservedStateVersionCheck versionCheck) throws I completion.get(); } + private void completeProcessing(Exception e, PlainActionFuture completion) { + if (e != null) { + healthIndicatorService.failureOccurred(e.toString()); + completion.onFailure(e); + } else { + completion.onResponse(null); + healthIndicatorService.successOccurred(); + } + } + @Override protected void onProcessFileChangesException(Exception e) { if (e instanceof ExecutionException) { @@ -172,11 +209,61 @@ protected void processInitialFileMissing() throws ExecutionException, Interrupte completion.get(); } - private static void completeProcessing(Exception e, PlainActionFuture completion) { - if (e != null) { - completion.onFailure(e); - } else { - completion.onResponse(null); + public static class FileSettingsHealthIndicatorService implements HealthIndicatorService { + static final String NAME = "file_settings"; + static final String NO_CHANGES_SYMPTOM = "No file-based setting changes have occurred"; + static final String SUCCESS_SYMPTOM = "The most recent file-based settings were applied successfully"; + static final String FAILURE_SYMPTOM = "The most recent file-based settings encountered an error"; + + static final List STALE_SETTINGS_IMPACT = List.of( + new HealthIndicatorImpact( + NAME, + "stale", + 3, + "The most recent file-based settings changes have not been applied.", + List.of(DEPLOYMENT_MANAGEMENT) + ) + ); + + private final AtomicLong changeCount = new AtomicLong(0); + private final AtomicLong failureStreak = new AtomicLong(0); + private final AtomicReference mostRecentFailure = new AtomicReference<>(); + + public void changeOccurred() { + changeCount.incrementAndGet(); + } + + public void successOccurred() { + failureStreak.set(0); + } + + public void failureOccurred(String description) { + failureStreak.incrementAndGet(); + mostRecentFailure.set(description); + } + + @Override + public String name() { + return NAME; + } + + @Override + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { + if (0 == changeCount.get()) { + return createIndicator(GREEN, NO_CHANGES_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()); + } + long numFailures = failureStreak.get(); + if (0 == numFailures) { + return createIndicator(GREEN, SUCCESS_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()); + } else { + return createIndicator( + YELLOW, + FAILURE_SYMPTOM, + new SimpleHealthIndicatorDetails(Map.of("failure_streak", numFailures, "most_recent_failure", mostRecentFailure.get())), + STALE_SETTINGS_IMPACT, + List.of() + ); + } } } } diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index f1b59ed14cefb..4564a37dacf4a 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -122,6 +122,7 @@ public final void handleRequest(RestRequest request, RestChannel channel, NodeCl ); } + usageCount.increment(); if (request.isStreamedContent()) { assert action instanceof RequestBodyChunkConsumer; var chunkConsumer = (RequestBodyChunkConsumer) action; @@ -137,11 +138,11 @@ public void close() { chunkConsumer.streamClose(); } }); + action.accept(channel); + } else { + action.accept(channel); + request.getHttpRequest().release(); } - - usageCount.increment(); - // execute the action - action.accept(channel); } } diff --git a/server/src/main/java/org/elasticsearch/rest/FilterRestHandler.java b/server/src/main/java/org/elasticsearch/rest/FilterRestHandler.java index cb5155cb0de0b..21a44ac9af5c8 100644 --- a/server/src/main/java/org/elasticsearch/rest/FilterRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/FilterRestHandler.java @@ -43,11 +43,6 @@ public boolean canTripCircuitBreaker() { return delegate.canTripCircuitBreaker(); } - @Override - public boolean allowsUnsafeBuffers() { - return delegate.allowsUnsafeBuffers(); - } - @Override public boolean supportsBulkContent() { return delegate.supportsBulkContent(); diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 7446ec5bb6717..49fe794bbe615 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -432,10 +432,6 @@ private void dispatchRequest( } // iff we could reserve bytes for the request we need to send the response also over this channel responseChannel = new ResourceHandlingHttpChannel(channel, circuitBreakerService, contentLength, methodHandlers); - // TODO: Count requests double in the circuit breaker if they need copying? - if (handler.allowsUnsafeBuffers() == false) { - request.ensureSafeBuffers(); - } if (handler.allowSystemIndexAccessByDefault() == false) { // The ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER indicates that the request is coming from an Elastic product and diff --git a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java b/server/src/main/java/org/elasticsearch/rest/RestFeatures.java index 8d546f7aa43f8..e72b30526c8e3 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java +++ b/server/src/main/java/org/elasticsearch/rest/RestFeatures.java @@ -9,13 +9,10 @@ package org.elasticsearch.rest; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.rest.action.admin.cluster.RestClusterGetSettingsAction; import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction; -import java.util.Map; import java.util.Set; import static org.elasticsearch.search.fetch.subphase.highlight.DefaultHighlighter.UNIFIED_HIGHLIGHTER_MATCHED_FIELDS; @@ -29,9 +26,4 @@ public Set getFeatures() { UNIFIED_HIGHLIGHTER_MATCHED_FIELDS ); } - - @Override - public Map getHistoricalFeatures() { - return Map.of(RestClusterGetSettingsAction.SUPPORTS_GET_SETTINGS_ACTION, Version.V_8_3_0); - } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestHandler.java b/server/src/main/java/org/elasticsearch/rest/RestHandler.java index cf66e402d3691..572e92e369a63 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/RestHandler.java @@ -69,18 +69,6 @@ default Scope getServerlessScope() { return serverlessScope == null ? null : serverlessScope.value(); } - /** - * Indicates if the RestHandler supports working with pooled buffers. If the request handler will not escape the return - * {@link RestRequest#content()} or any buffers extracted from it then there is no need to make a copies of any pooled buffers in the - * {@link RestRequest} instance before passing a request to this handler. If this instance does not support pooled/unsafe buffers - * {@link RestRequest#ensureSafeBuffers()} should be called on any request before passing it to {@link #handleRequest}. - * - * @return true iff the handler supports requests that make use of pooled buffers - */ - default boolean allowsUnsafeBuffers() { - return false; - } - /** * The list of {@link Route}s that this RestHandler is responsible for handling. */ diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index 17eda305b5ccf..a04bdcb32f2b4 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; @@ -27,6 +28,8 @@ import org.elasticsearch.http.HttpBody; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.xcontent.ParsedMediaType; import org.elasticsearch.xcontent.ToXContent; @@ -51,6 +54,8 @@ public class RestRequest implements ToXContent.Params, Traceable { + private static final Logger logger = LogManager.getLogger(RestRequest.class); + /** * Internal marker request parameter to indicate that a request was made in serverless mode. Use this parameter, together with * {@link #OPERATOR_REQUEST} if you need to toggle behavior for serverless, for example to enforce partial API restrictions @@ -187,15 +192,6 @@ protected RestRequest(RestRequest other) { } } - /** - * Invoke {@link HttpRequest#releaseAndCopy()} on the http request in this instance and replace a pooled http request - * with an unpooled copy. This is supposed to be used before passing requests to {@link RestHandler} instances that can not safely - * handle http requests that use pooled buffers as determined by {@link RestHandler#allowsUnsafeBuffers()}. - */ - void ensureSafeBuffers() { - httpRequest = httpRequest.releaseAndCopy(); - } - /** * Creates a new REST request. * @@ -306,9 +302,22 @@ public boolean isFullContent() { return httpRequest.body().isFull(); } - public BytesReference content() { + /** + * Returns a direct reference to the network buffer containing the request body. The HTTP layers will release their references to this + * buffer as soon as they have finished the synchronous steps of processing the request on the network thread, which will by default + * release the buffer back to the pool where it may be re-used for another request. If you need to keep the buffer alive past the end of + * these synchronous steps, acquire your own reference to this buffer and release it once it's no longer needed. + */ + public ReleasableBytesReference content() { this.contentConsumed = true; - return httpRequest.body().asFull().bytes(); + var bytes = httpRequest.body().asFull().bytes(); + if (bytes.hasReferences() == false) { + var e = new IllegalStateException("http releasable content accessed after release"); + logger.error(e.getMessage(), e); + assert false : e; + throw e; + } + return bytes; } public boolean isStreamedContent() { @@ -320,9 +329,10 @@ public HttpBody.Stream contentStream() { } /** - * @return content of the request body or throw an exception if the body or content type is missing + * Returns reference to the network buffer of HTTP content or throw an exception if the body or content type is missing. + * See {@link #content()}. */ - public final BytesReference requiredContent() { + public ReleasableBytesReference requiredContent() { if (hasContent() == false) { throw new ElasticsearchParseException("request body is required"); } else if (xContentType.get() == null) { @@ -563,7 +573,7 @@ public final boolean hasContentOrSourceParam() { * if you need to handle the absence request content gracefully. */ public final XContentParser contentOrSourceParamParser() throws IOException { - Tuple tuple = contentOrSourceParam(); + Tuple tuple = contentOrSourceParam(); return XContentHelper.createParserNotCompressed(parserConfig, tuple.v2(), tuple.v1().xContent().type()); } @@ -574,7 +584,7 @@ public final XContentParser contentOrSourceParamParser() throws IOException { */ public final void withContentOrSourceParamParserOrNull(CheckedConsumer withParser) throws IOException { if (hasContentOrSourceParam()) { - Tuple tuple = contentOrSourceParam(); + Tuple tuple = contentOrSourceParam(); try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, tuple.v2(), tuple.v1())) { withParser.accept(parser); } @@ -587,7 +597,7 @@ public final void withContentOrSourceParamParserOrNull(CheckedConsumer contentOrSourceParam() { + public final Tuple contentOrSourceParam() { if (hasContentOrSourceParam() == false) { throw new ElasticsearchParseException("request body or source parameter is required"); } else if (hasContent()) { @@ -603,7 +613,7 @@ public final Tuple contentOrSourceParam() { if (xContentType == null) { throwValidationException("Unknown value for source_content_type [" + typeParam + "]"); } - return new Tuple<>(xContentType, bytes); + return new Tuple<>(xContentType, ReleasableBytesReference.wrap(bytes)); } public ParsedMediaType getParsedAccept() { diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequestFilter.java b/server/src/main/java/org/elasticsearch/rest/RestRequestFilter.java index e4105363e1bce..7c90d9168e6c8 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequestFilter.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequestFilter.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Tuple; @@ -44,7 +45,7 @@ public boolean hasContent() { } @Override - public BytesReference content() { + public ReleasableBytesReference content() { if (filteredBytes == null) { Tuple> result = XContentHelper.convertToMap( restRequest.requiredContent(), @@ -63,7 +64,7 @@ public BytesReference content() { throw new ElasticsearchException("failed to parse request", e); } } - return filteredBytes; + return ReleasableBytesReference.wrap(filteredBytes); } }; } else { diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index 29cae343fb09e..d043974055667 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -22,6 +22,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -146,12 +147,12 @@ public RestResponse(RestChannel channel, RestStatus status, Exception e) throws params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } - if (channel.detailedErrorsEnabled() == false) { + if (channel.request().getRestApiVersion() == RestApiVersion.V_8 && channel.detailedErrorsEnabled() == false) { deprecationLogger.warn( DeprecationCategory.API, "http_detailed_errors", - "The JSON format of non-detailed errors will change in Elasticsearch 9.0 to match the JSON structure" - + " used for detailed errors. To keep using the existing format, use the V8 REST API." + "The JSON format of non-detailed errors has changed in Elasticsearch 9.0 to match the JSON structure" + + " used for detailed errors." ); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 946931e166363..ca9e4abcaeec7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -11,13 +11,11 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; @@ -27,7 +25,6 @@ import java.io.IOException; import java.util.List; import java.util.Set; -import java.util.function.Predicate; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @@ -35,23 +32,14 @@ @ServerlessScope(Scope.INTERNAL) public class RestClusterGetSettingsAction extends BaseRestHandler { - public static final NodeFeature SUPPORTS_GET_SETTINGS_ACTION = new NodeFeature("rest.get_settings_action"); - private final Settings settings; private final ClusterSettings clusterSettings; private final SettingsFilter settingsFilter; - private final Predicate clusterSupportsFeature; - public RestClusterGetSettingsAction( - Settings settings, - ClusterSettings clusterSettings, - SettingsFilter settingsFilter, - Predicate clusterSupportsFeature - ) { + public RestClusterGetSettingsAction(Settings settings, ClusterSettings clusterSettings, SettingsFilter settingsFilter) { this.settings = settings; this.clusterSettings = clusterSettings; this.settingsFilter = settingsFilter; - this.clusterSupportsFeature = clusterSupportsFeature; } @Override @@ -72,10 +60,6 @@ private static void setUpRequestParams(MasterNodeReadRequest clusterRequest, public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); - if (clusterSupportsFeature.test(SUPPORTS_GET_SETTINGS_ACTION) == false) { - return prepareLegacyRequest(request, client, renderDefaults); - } - ClusterGetSettingsAction.Request clusterSettingsRequest = new ClusterGetSettingsAction.Request(getMasterNodeTimeout(request)); setUpRequestParams(clusterSettingsRequest, request); @@ -89,29 +73,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC ); } - private RestChannelConsumer prepareLegacyRequest(final RestRequest request, final NodeClient client, final boolean renderDefaults) { - ClusterStateRequest clusterStateRequest = new ClusterStateRequest(getMasterNodeTimeout(request)).routingTable(false).nodes(false); - setUpRequestParams(clusterStateRequest, request); - return channel -> client.admin() - .cluster() - .state( - clusterStateRequest, - new RestToXContentListener(channel).map( - r -> response( - new ClusterGetSettingsAction.Response( - r.getState().metadata().persistentSettings(), - r.getState().metadata().transientSettings(), - r.getState().metadata().settings() - ), - renderDefaults, - settingsFilter, - clusterSettings, - settings - ) - ) - ); - } - @Override protected Set responseParams() { return Settings.FORMAT_PARAMS; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index 4451117fa4792..a698dc3f30577 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -8,6 +8,7 @@ */ package org.elasticsearch.rest.action.admin.cluster; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.client.internal.node.NodeClient; @@ -57,6 +58,10 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client request.getXContentType(), StoredScriptSource.parse(content, xContentType) ); - return channel -> client.execute(TransportPutStoredScriptAction.TYPE, putRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute( + TransportPutStoredScriptAction.TYPE, + putRequest, + ActionListener.withRef(new RestToXContentListener<>(channel), content) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index de3fd390ec86d..dea7b7138d0d0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -10,6 +10,7 @@ package org.elasticsearch.rest.action.document; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestParser; @@ -102,9 +103,11 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC boolean defaultRequireDataStream = request.paramAsBoolean(DocWriteRequest.REQUIRE_DATA_STREAM, false); bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); + ReleasableBytesReference content = request.requiredContent(); + try { bulkRequest.add( - request.requiredContent(), + content, defaultIndex, defaultRouting, defaultFetchSourceContext, @@ -119,8 +122,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } catch (Exception e) { return channel -> new RestToXContentListener<>(channel).onFailure(parseFailureException(e)); } - - return channel -> client.bulk(bulkRequest, new RestRefCountedChunkedToXContentListener<>(channel)); + return channel -> { + content.mustIncRef(); + client.bulk(bulkRequest, ActionListener.releaseAfter(new RestRefCountedChunkedToXContentListener<>(channel), content)); + }; } else { String waitForActiveShards = request.param("wait_for_active_shards"); TimeValue timeout = request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT); @@ -270,11 +275,6 @@ public boolean supportsBulkContent() { return true; } - @Override - public boolean allowsUnsafeBuffers() { - return true; - } - @Override public Set supportedCapabilities() { return capabilities; diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index c2437dcb96fa6..d40c6225cc7b4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -9,12 +9,14 @@ package org.elasticsearch.rest.action.document; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -104,11 +106,12 @@ public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + ReleasableBytesReference source = request.requiredContent(); IndexRequest indexRequest = new IndexRequest(request.param("index")); indexRequest.id(request.param("id")); indexRequest.routing(request.param("routing")); indexRequest.setPipeline(request.param("pipeline")); - indexRequest.source(request.requiredContent(), request.getXContentType()); + indexRequest.source(source, request.getXContentType()); indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT)); indexRequest.setRefreshPolicy(request.param("refresh")); indexRequest.version(RestActions.parseVersion(request)); @@ -126,10 +129,16 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indexRequest.opType(sOpType); } - return channel -> client.index( - indexRequest, - new RestToXContentListener<>(channel, DocWriteResponse::status, r -> r.getLocation(indexRequest.routing())) - ); + return channel -> { + source.mustIncRef(); + client.index( + indexRequest, + ActionListener.releaseAfter( + new RestToXContentListener<>(channel, DocWriteResponse::status, r -> r.getLocation(indexRequest.routing())), + source + ) + ); + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index 269d9b08ab66b..c6b3daa38d663 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -9,10 +9,11 @@ package org.elasticsearch.rest.action.ingest; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Tuple; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -56,15 +57,20 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl } } - Tuple sourceTuple = restRequest.contentOrSourceParam(); + Tuple sourceTuple = restRequest.contentOrSourceParam(); + var content = sourceTuple.v2(); final var request = new PutPipelineRequest( getMasterNodeTimeout(restRequest), getAckTimeout(restRequest), restRequest.param("id"), - sourceTuple.v2(), + content, sourceTuple.v1(), ifVersion ); - return channel -> client.execute(PutPipelineTransportAction.TYPE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute( + PutPipelineTransportAction.TYPE, + request, + ActionListener.withRef(new RestToXContentListener<>(channel), content) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java index c825a8198e6e4..978b6d1c3a92d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulateIngestAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.bulk.SimulateBulkRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; import org.elasticsearch.ingest.ConfigurationUtils; @@ -72,7 +73,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String defaultIndex = request.param("index"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); String defaultPipeline = request.param("pipeline"); - Tuple sourceTuple = request.contentOrSourceParam(); + Tuple sourceTuple = request.contentOrSourceParam(); Map sourceMap = XContentHelper.convertToMap(sourceTuple.v2(), false, sourceTuple.v1()).v2(); Map> pipelineSubstitutions = (Map>) sourceMap.remove( "pipeline_substitutions" diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java index f85b89f774477..efd2777d6d7a6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestSimulatePipelineAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Tuple; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -46,8 +46,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - Tuple sourceTuple = restRequest.contentOrSourceParam(); - SimulatePipelineRequest request = new SimulatePipelineRequest(sourceTuple.v2(), sourceTuple.v1(), restRequest.getRestApiVersion()); + Tuple sourceTuple = restRequest.contentOrSourceParam(); + final var request = new SimulatePipelineRequest(sourceTuple.v2(), sourceTuple.v1(), restRequest.getRestApiVersion()); request.setId(restRequest.param("id")); request.setVerbose(restRequest.paramAsBoolean("verbose", false)); return channel -> client.admin().cluster().simulatePipeline(request, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index aeb182978e1eb..89775b4ca8e15 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.features.NodeFeature; @@ -184,9 +184,9 @@ public static void parseMultiLineRequest( boolean ccsMinimizeRoundtrips = request.paramAsBoolean("ccs_minimize_roundtrips", true); String routing = request.param("routing"); - final Tuple sourceTuple = request.contentOrSourceParam(); + final Tuple sourceTuple = request.contentOrSourceParam(); final XContent xContent = sourceTuple.v1().xContent(); - final BytesReference data = sourceTuple.v2(); + final ReleasableBytesReference data = sourceTuple.v2(); MultiSearchRequest.readMultiLineFormat( xContent, request.contentParserConfig(), diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 80a85d3b9b748..ff062084a3cbb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -423,8 +423,4 @@ protected Set responseParams() { return RESPONSE_PARAMS; } - @Override - public boolean allowsUnsafeBuffers() { - return true; - } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java index 3bc1c467323a3..794b30aa5aab2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java @@ -27,7 +27,7 @@ private SearchCapabilities() {} /** Support synthetic source with `bit` type in `dense_vector` field when `index` is set to `false`. */ private static final String BIT_DENSE_VECTOR_SYNTHETIC_SOURCE_CAPABILITY = "bit_dense_vector_synthetic_source"; /** Support Byte and Float with Bit dot product. */ - private static final String BYTE_FLOAT_BIT_DOT_PRODUCT_CAPABILITY = "byte_float_bit_dot_product"; + private static final String BYTE_FLOAT_BIT_DOT_PRODUCT_CAPABILITY = "byte_float_bit_dot_product_with_bugfix"; /** Support docvalue_fields parameter for `dense_vector` field. */ private static final String DENSE_VECTOR_DOCVALUE_FIELDS = "dense_vector_docvalue_fields"; /** Support transforming rank rrf queries to the corresponding rrf retriever. */ @@ -38,6 +38,12 @@ private SearchCapabilities() {} private static final String MULTI_DENSE_VECTOR_FIELD_MAPPER = "multi_dense_vector_field_mapper"; /** Support propagating nested retrievers' inner_hits to top-level compound retrievers . */ private static final String NESTED_RETRIEVER_INNER_HITS_SUPPORT = "nested_retriever_inner_hits_support"; + /** Support multi-dense-vector script field access. */ + private static final String MULTI_DENSE_VECTOR_SCRIPT_ACCESS = "multi_dense_vector_script_access"; + /** Initial support for multi-dense-vector maxSim functions access. */ + private static final String MULTI_DENSE_VECTOR_SCRIPT_MAX_SIM = "multi_dense_vector_script_max_sim_with_bugfix"; + + private static final String RANDOM_SAMPLER_WITH_SCORED_SUBAGGS = "random_sampler_with_scored_subaggs"; public static final Set CAPABILITIES; static { @@ -48,8 +54,11 @@ private SearchCapabilities() {} capabilities.add(DENSE_VECTOR_DOCVALUE_FIELDS); capabilities.add(TRANSFORM_RANK_RRF_TO_RETRIEVER); capabilities.add(NESTED_RETRIEVER_INNER_HITS_SUPPORT); + capabilities.add(RANDOM_SAMPLER_WITH_SCORED_SUBAGGS); if (MultiDenseVectorFieldMapper.FEATURE_FLAG.isEnabled()) { capabilities.add(MULTI_DENSE_VECTOR_FIELD_MAPPER); + capabilities.add(MULTI_DENSE_VECTOR_SCRIPT_ACCESS); + capabilities.add(MULTI_DENSE_VECTOR_SCRIPT_MAX_SIM); } if (Build.current().isSnapshot()) { capabilities.add(KQL_QUERY_SUPPORTED); diff --git a/server/src/main/java/org/elasticsearch/script/MultiVectorScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/MultiVectorScoreScriptUtils.java new file mode 100644 index 0000000000000..136c5e7b57d4b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/MultiVectorScoreScriptUtils.java @@ -0,0 +1,372 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.script.field.vectors.DenseVector; +import org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField; + +import java.io.IOException; +import java.util.HexFormat; +import java.util.List; + +public class MultiVectorScoreScriptUtils { + + public static class MultiDenseVectorFunction { + protected final ScoreScript scoreScript; + protected final MultiDenseVectorDocValuesField field; + + public MultiDenseVectorFunction(ScoreScript scoreScript, MultiDenseVectorDocValuesField field) { + this.scoreScript = scoreScript; + this.field = field; + } + + void setNextVector() { + try { + field.setNextDocId(scoreScript._getDocId()); + } catch (IOException e) { + throw ExceptionsHelper.convertToElastic(e); + } + if (field.isEmpty()) { + throw new IllegalArgumentException("A document doesn't have a value for a multi-vector field!"); + } + } + } + + public static class ByteMultiDenseVectorFunction extends MultiDenseVectorFunction { + protected final byte[][] queryVector; + + /** + * Constructs a dense vector function used for byte-sized vectors. + * + * @param scoreScript The script in which this function was referenced. + * @param field The vector field. + * @param queryVector The query vector. + */ + public ByteMultiDenseVectorFunction(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, List> queryVector) { + super(scoreScript, field); + if (queryVector.isEmpty()) { + throw new IllegalArgumentException("The query vector is empty."); + } + field.getElementType().checkDimensions(field.get().getDims(), queryVector.get(0).size()); + this.queryVector = new byte[queryVector.size()][queryVector.get(0).size()]; + float[] validateValues = new float[queryVector.size()]; + int lastSize = -1; + for (int i = 0; i < queryVector.size(); i++) { + if (lastSize != -1 && lastSize != queryVector.get(i).size()) { + throw new IllegalArgumentException( + "The query vector contains inner vectors which have inconsistent number of dimensions." + ); + } + lastSize = queryVector.get(i).size(); + for (int j = 0; j < queryVector.get(i).size(); j++) { + final Number number = queryVector.get(i).get(j); + byte value = number.byteValue(); + this.queryVector[i][j] = value; + validateValues[i] = number.floatValue(); + } + field.getElementType().checkVectorBounds(validateValues); + } + } + + /** + * Constructs a dense vector function used for byte-sized vectors. + * + * @param scoreScript The script in which this function was referenced. + * @param field The vector field. + * @param queryVector The query vector. + */ + public ByteMultiDenseVectorFunction(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, byte[][] queryVector) { + super(scoreScript, field); + this.queryVector = queryVector; + } + } + + public static class FloatMultiDenseVectorFunction extends MultiDenseVectorFunction { + protected final float[][] queryVector; + + /** + * Constructs a dense vector function used for float vectors. + * + * @param scoreScript The script in which this function was referenced. + * @param field The vector field. + * @param queryVector The query vector. + */ + public FloatMultiDenseVectorFunction( + ScoreScript scoreScript, + MultiDenseVectorDocValuesField field, + List> queryVector + ) { + super(scoreScript, field); + if (queryVector.isEmpty()) { + throw new IllegalArgumentException("The query vector is empty."); + } + DenseVector.checkDimensions(field.get().getDims(), queryVector.get(0).size()); + + this.queryVector = new float[queryVector.size()][queryVector.get(0).size()]; + int lastSize = -1; + for (int i = 0; i < queryVector.size(); i++) { + if (lastSize != -1 && lastSize != queryVector.get(i).size()) { + throw new IllegalArgumentException( + "The query vector contains inner vectors which have inconsistent number of dimensions." + ); + } + lastSize = queryVector.get(i).size(); + for (int j = 0; j < queryVector.get(i).size(); j++) { + this.queryVector[i][j] = queryVector.get(i).get(j).floatValue(); + } + field.getElementType().checkVectorBounds(this.queryVector[i]); + } + } + } + + // Calculate Hamming distances between a query's dense vector and documents' dense vectors + public interface MaxSimInvHammingDistanceInterface { + float maxSimInvHamming(); + } + + public static class ByteMaxSimInvHammingDistance extends ByteMultiDenseVectorFunction implements MaxSimInvHammingDistanceInterface { + + public ByteMaxSimInvHammingDistance(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, List> queryVector) { + super(scoreScript, field, queryVector); + } + + public ByteMaxSimInvHammingDistance(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, byte[][] queryVector) { + super(scoreScript, field, queryVector); + } + + public float maxSimInvHamming() { + setNextVector(); + return field.get().maxSimInvHamming(queryVector); + } + } + + private record BytesOrList(byte[][] bytes, List> list) {} + + @SuppressWarnings("unchecked") + private static BytesOrList parseBytes(Object queryVector) { + if (queryVector instanceof List) { + // check if its a list of strings or list of lists + if (((List) queryVector).get(0) instanceof List) { + return new BytesOrList(null, ((List>) queryVector)); + } else if (((List) queryVector).get(0) instanceof String) { + byte[][] parsedQueryVector = new byte[((List) queryVector).size()][]; + int lastSize = -1; + for (int i = 0; i < ((List) queryVector).size(); i++) { + parsedQueryVector[i] = HexFormat.of().parseHex((String) ((List) queryVector).get(i)); + if (lastSize != -1 && lastSize != parsedQueryVector[i].length) { + throw new IllegalArgumentException( + "The query vector contains inner vectors which have inconsistent number of dimensions." + ); + } + lastSize = parsedQueryVector[i].length; + } + return new BytesOrList(parsedQueryVector, null); + } else { + throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName()); + } + } else { + throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName()); + } + } + + public static final class MaxSimInvHamming { + + private final MaxSimInvHammingDistanceInterface function; + + public MaxSimInvHamming(ScoreScript scoreScript, Object queryVector, String fieldName) { + MultiDenseVectorDocValuesField field = (MultiDenseVectorDocValuesField) scoreScript.field(fieldName); + if (field.getElementType() == DenseVectorFieldMapper.ElementType.FLOAT) { + throw new IllegalArgumentException("hamming distance is only supported for byte or bit vectors"); + } + BytesOrList bytesOrList = parseBytes(queryVector); + if (bytesOrList.bytes != null) { + this.function = new ByteMaxSimInvHammingDistance(scoreScript, field, bytesOrList.bytes); + } else { + this.function = new ByteMaxSimInvHammingDistance(scoreScript, field, bytesOrList.list); + } + } + + public double maxSimInvHamming() { + return function.maxSimInvHamming(); + } + } + + // Calculate a dot product between a query's dense vector and documents' dense vectors + public interface MaxSimDotProductInterface { + double maxSimDotProduct(); + } + + public static class MaxSimBitDotProduct extends MultiDenseVectorFunction implements MaxSimDotProductInterface { + private final byte[][] byteQueryVector; + private final float[][] floatQueryVector; + + public MaxSimBitDotProduct(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, byte[][] queryVector) { + super(scoreScript, field); + if (field.getElementType() != DenseVectorFieldMapper.ElementType.BIT) { + throw new IllegalArgumentException("Cannot calculate bit dot product for non-bit vectors"); + } + int fieldDims = field.get().getDims(); + if (fieldDims != queryVector.length * Byte.SIZE && fieldDims != queryVector.length) { + throw new IllegalArgumentException( + "The query vector has an incorrect number of dimensions. Must be [" + + fieldDims / 8 + + "] for bitwise operations, or [" + + fieldDims + + "] for byte wise operations: provided [" + + queryVector.length + + "]." + ); + } + this.byteQueryVector = queryVector; + this.floatQueryVector = null; + } + + public MaxSimBitDotProduct(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, List> queryVector) { + super(scoreScript, field); + if (queryVector.isEmpty()) { + throw new IllegalArgumentException("The query vector is empty."); + } + if (field.getElementType() != DenseVectorFieldMapper.ElementType.BIT) { + throw new IllegalArgumentException("cannot calculate bit dot product for non-bit vectors"); + } + float[][] floatQueryVector = new float[queryVector.size()][]; + byte[][] byteQueryVector = new byte[queryVector.size()][]; + boolean isFloat = false; + int lastSize = -1; + for (int i = 0; i < queryVector.size(); i++) { + if (lastSize != -1 && lastSize != queryVector.get(i).size()) { + throw new IllegalArgumentException( + "The query vector contains inner vectors which have inconsistent number of dimensions." + ); + } + lastSize = queryVector.get(i).size(); + floatQueryVector[i] = new float[queryVector.get(i).size()]; + if (isFloat == false) { + byteQueryVector[i] = new byte[queryVector.get(i).size()]; + } + for (int j = 0; j < queryVector.get(i).size(); j++) { + Number number = queryVector.get(i).get(j); + floatQueryVector[i][j] = number.floatValue(); + if (isFloat == false) { + byteQueryVector[i][j] = number.byteValue(); + } + if (isFloat + || floatQueryVector[i][j] % 1.0f != 0.0f + || floatQueryVector[i][j] < Byte.MIN_VALUE + || floatQueryVector[i][j] > Byte.MAX_VALUE) { + isFloat = true; + } + } + } + int fieldDims = field.get().getDims(); + if (isFloat) { + this.floatQueryVector = floatQueryVector; + this.byteQueryVector = null; + if (fieldDims != floatQueryVector[0].length) { + throw new IllegalArgumentException( + "The query vector contains inner vectors which have incorrect number of dimensions. Must be [" + + fieldDims + + "] for float wise operations: provided [" + + floatQueryVector[0].length + + "]." + ); + } + } else { + this.floatQueryVector = null; + this.byteQueryVector = byteQueryVector; + if (fieldDims != byteQueryVector[0].length * Byte.SIZE && fieldDims != byteQueryVector[0].length) { + throw new IllegalArgumentException( + "The query vector contains inner vectors which have incorrect number of dimensions. Must be [" + + fieldDims / 8 + + "] for bitwise operations, or [" + + fieldDims + + "] for byte wise operations: provided [" + + byteQueryVector[0].length + + "]." + ); + } + } + } + + @Override + public double maxSimDotProduct() { + setNextVector(); + return byteQueryVector != null ? field.get().maxSimDotProduct(byteQueryVector) : field.get().maxSimDotProduct(floatQueryVector); + } + } + + public static class MaxSimByteDotProduct extends ByteMultiDenseVectorFunction implements MaxSimDotProductInterface { + + public MaxSimByteDotProduct(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, List> queryVector) { + super(scoreScript, field, queryVector); + } + + public MaxSimByteDotProduct(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, byte[][] queryVector) { + super(scoreScript, field, queryVector); + } + + public double maxSimDotProduct() { + setNextVector(); + return field.get().maxSimDotProduct(queryVector); + } + } + + public static class MaxSimFloatDotProduct extends FloatMultiDenseVectorFunction implements MaxSimDotProductInterface { + + public MaxSimFloatDotProduct(ScoreScript scoreScript, MultiDenseVectorDocValuesField field, List> queryVector) { + super(scoreScript, field, queryVector); + } + + public double maxSimDotProduct() { + setNextVector(); + return field.get().maxSimDotProduct(queryVector); + } + } + + public static final class MaxSimDotProduct { + + private final MaxSimDotProductInterface function; + + @SuppressWarnings("unchecked") + public MaxSimDotProduct(ScoreScript scoreScript, Object queryVector, String fieldName) { + MultiDenseVectorDocValuesField field = (MultiDenseVectorDocValuesField) scoreScript.field(fieldName); + function = switch (field.getElementType()) { + case BIT -> { + BytesOrList bytesOrList = parseBytes(queryVector); + if (bytesOrList.bytes != null) { + yield new MaxSimBitDotProduct(scoreScript, field, bytesOrList.bytes); + } else { + yield new MaxSimBitDotProduct(scoreScript, field, bytesOrList.list); + } + } + case BYTE -> { + BytesOrList bytesOrList = parseBytes(queryVector); + if (bytesOrList.bytes != null) { + yield new MaxSimByteDotProduct(scoreScript, field, bytesOrList.bytes); + } else { + yield new MaxSimByteDotProduct(scoreScript, field, bytesOrList.list); + } + } + case FLOAT -> { + if (queryVector instanceof List) { + yield new MaxSimFloatDotProduct(scoreScript, field, (List>) queryVector); + } + throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName()); + } + }; + } + + public double maxSimDotProduct() { + return function.maxSimDotProduct(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVector.java new file mode 100644 index 0000000000000..7805816090d51 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVector.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.VectorUtil; +import org.elasticsearch.simdvec.ESVectorUtil; + +import java.util.Arrays; + +public class BitMultiDenseVector extends ByteMultiDenseVector { + public BitMultiDenseVector(VectorIterator vectorValues, BytesRef magnitudesBytes, int numVecs, int dims) { + super(vectorValues, magnitudesBytes, numVecs, dims); + } + + @Override + public void checkDimensions(int qvDims) { + if (qvDims != dims) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + + qvDims * Byte.SIZE + + "] than the document vectors [" + + dims * Byte.SIZE + + "]." + ); + } + } + + @Override + public float maxSimDotProduct(float[][] query) { + vectorValues.reset(); + float[] maxes = new float[query.length]; + Arrays.fill(maxes, Float.NEGATIVE_INFINITY); + while (vectorValues.hasNext()) { + byte[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], ESVectorUtil.ipFloatBit(query[i], vv)); + } + } + float sums = 0; + for (float m : maxes) { + sums += m; + } + return sums; + } + + @Override + public float maxSimDotProduct(byte[][] query) { + vectorValues.reset(); + float[] maxes = new float[query.length]; + Arrays.fill(maxes, Float.NEGATIVE_INFINITY); + if (query[0].length == dims) { + while (vectorValues.hasNext()) { + byte[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], ESVectorUtil.andBitCount(query[i], vv)); + } + } + } else { + while (vectorValues.hasNext()) { + byte[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], ESVectorUtil.ipByteBit(query[i], vv)); + } + } + } + float sum = 0; + for (float m : maxes) { + sum += m; + } + return sum; + } + + @Override + public float maxSimInvHamming(byte[][] query) { + vectorValues.reset(); + int bitCount = this.getDims(); + float[] maxes = new float[query.length]; + Arrays.fill(maxes, Float.NEGATIVE_INFINITY); + while (vectorValues.hasNext()) { + byte[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], ((bitCount - VectorUtil.xorBitCount(vv, query[i])) / (float) bitCount)); + } + } + float sum = 0; + for (float m : maxes) { + sum += m; + } + return sum; + } + + @Override + public int getDims() { + return dims * Byte.SIZE; + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..35a43eabb8f0c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/BitMultiDenseVectorDocValuesField.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; + +public class BitMultiDenseVectorDocValuesField extends ByteMultiDenseVectorDocValuesField { + + public BitMultiDenseVectorDocValuesField( + BinaryDocValues input, + BinaryDocValues magnitudes, + String name, + ElementType elementType, + int dims + ) { + super(input, magnitudes, name, elementType, dims / 8); + } + + @Override + protected MultiDenseVector getVector() { + return new BitMultiDenseVector(vectorValue, magnitudesValue, numVecs, dims); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVector.java new file mode 100644 index 0000000000000..5e9d3e05746c8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVector.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.VectorUtil; +import org.elasticsearch.index.mapper.vectors.VectorEncoderDecoder; + +import java.util.Arrays; +import java.util.Iterator; + +public class ByteMultiDenseVector implements MultiDenseVector { + + protected final VectorIterator vectorValues; + protected final int numVecs; + protected final int dims; + + private float[] magnitudes; + private final BytesRef magnitudesBytes; + + public ByteMultiDenseVector(VectorIterator vectorValues, BytesRef magnitudesBytes, int numVecs, int dims) { + assert magnitudesBytes.length == numVecs * Float.BYTES; + this.vectorValues = vectorValues; + this.numVecs = numVecs; + this.dims = dims; + this.magnitudesBytes = magnitudesBytes; + } + + @Override + public float maxSimDotProduct(float[][] query) { + throw new UnsupportedOperationException("use [float maxSimDotProduct(byte[][] queryVector)] instead"); + } + + @Override + public float maxSimDotProduct(byte[][] query) { + vectorValues.reset(); + float[] maxes = new float[query.length]; + Arrays.fill(maxes, Float.NEGATIVE_INFINITY); + while (vectorValues.hasNext()) { + byte[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], VectorUtil.dotProduct(query[i], vv)); + } + } + float sum = 0; + for (float m : maxes) { + sum += m; + } + return sum; + } + + @Override + public float maxSimInvHamming(byte[][] query) { + vectorValues.reset(); + int bitCount = dims * Byte.SIZE; + float[] maxes = new float[query.length]; + Arrays.fill(maxes, Float.NEGATIVE_INFINITY); + while (vectorValues.hasNext()) { + byte[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], ((bitCount - VectorUtil.xorBitCount(vv, query[i])) / (float) bitCount)); + } + } + float sum = 0; + for (float m : maxes) { + sum += m; + } + return sum; + } + + @Override + public Iterator getVectors() { + return new ByteToFloatIteratorWrapper(vectorValues.copy(), dims); + } + + @Override + public float[] getMagnitudes() { + if (magnitudes == null) { + magnitudes = VectorEncoderDecoder.getMultiMagnitudes(magnitudesBytes); + } + return magnitudes; + } + + @Override + public boolean isEmpty() { + return false; + } + + @Override + public int getDims() { + return dims; + } + + @Override + public int size() { + return numVecs; + } + + static class ByteToFloatIteratorWrapper implements Iterator { + private final Iterator byteIterator; + private final float[] buffer; + private final int dims; + + ByteToFloatIteratorWrapper(Iterator byteIterator, int dims) { + this.byteIterator = byteIterator; + this.buffer = new float[dims]; + this.dims = dims; + } + + @Override + public boolean hasNext() { + return byteIterator.hasNext(); + } + + @Override + public float[] next() { + byte[] next = byteIterator.next(); + for (int i = 0; i < dims; i++) { + buffer[i] = next[i]; + } + return buffer; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..d45c5b85137f5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteMultiDenseVectorDocValuesField.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues; + +import java.io.IOException; +import java.util.Iterator; + +public class ByteMultiDenseVectorDocValuesField extends MultiDenseVectorDocValuesField { + + protected final BinaryDocValues input; + private final BinaryDocValues magnitudes; + protected final int dims; + protected int numVecs; + protected VectorIterator vectorValue; + protected boolean decoded; + protected BytesRef value; + protected BytesRef magnitudesValue; + private byte[] buffer; + + public ByteMultiDenseVectorDocValuesField( + BinaryDocValues input, + BinaryDocValues magnitudes, + String name, + ElementType elementType, + int dims + ) { + super(name, elementType); + this.input = input; + this.dims = dims; + this.buffer = new byte[dims]; + this.magnitudes = magnitudes; + } + + @Override + public void setNextDocId(int docId) throws IOException { + decoded = false; + if (input.advanceExact(docId)) { + boolean magnitudesFound = magnitudes.advanceExact(docId); + assert magnitudesFound; + value = input.binaryValue(); + assert value.length % dims == 0; + numVecs = value.length / dims; + magnitudesValue = magnitudes.binaryValue(); + assert magnitudesValue.length == (numVecs * Float.BYTES); + } else { + value = null; + magnitudesValue = null; + vectorValue = null; + numVecs = 0; + } + } + + @Override + public MultiDenseVectorScriptDocValues toScriptDocValues() { + return new MultiDenseVectorScriptDocValues(this, dims); + } + + protected MultiDenseVector getVector() { + return new ByteMultiDenseVector(vectorValue, magnitudesValue, numVecs, dims); + } + + @Override + public MultiDenseVector get() { + if (isEmpty()) { + return MultiDenseVector.EMPTY; + } + decodeVectorIfNecessary(); + return getVector(); + } + + @Override + public MultiDenseVector get(MultiDenseVector defaultValue) { + if (isEmpty()) { + return defaultValue; + } + decodeVectorIfNecessary(); + return getVector(); + } + + @Override + public MultiDenseVector getInternal() { + return get(null); + } + + private void decodeVectorIfNecessary() { + if (decoded == false && value != null) { + vectorValue = new ByteVectorIterator(value, buffer, numVecs); + decoded = true; + } + } + + @Override + public int size() { + return value == null ? 0 : value.length / dims; + } + + @Override + public boolean isEmpty() { + return value == null; + } + + static class ByteVectorIterator implements VectorIterator { + private final byte[] buffer; + private final BytesRef vectorValues; + private final int size; + private int idx = 0; + + ByteVectorIterator(BytesRef vectorValues, byte[] buffer, int size) { + assert vectorValues.length == (buffer.length * size); + this.vectorValues = vectorValues; + this.size = size; + this.buffer = buffer; + } + + @Override + public boolean hasNext() { + return idx < size; + } + + @Override + public byte[] next() { + if (hasNext() == false) { + throw new IllegalArgumentException("No more elements in the iterator"); + } + System.arraycopy(vectorValues.bytes, vectorValues.offset + idx * buffer.length, buffer, 0, buffer.length); + idx++; + return buffer; + } + + @Override + public Iterator copy() { + return new ByteVectorIterator(vectorValues, new byte[buffer.length], size); + } + + @Override + public void reset() { + idx = 0; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVector.java new file mode 100644 index 0000000000000..9c2f7eb6a86d4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVector.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.VectorUtil; + +import java.util.Arrays; +import java.util.Iterator; + +import static org.elasticsearch.index.mapper.vectors.VectorEncoderDecoder.getMultiMagnitudes; + +public class FloatMultiDenseVector implements MultiDenseVector { + + private final BytesRef magnitudes; + private float[] magnitudesArray = null; + private final int dims; + private final int numVectors; + private final VectorIterator vectorValues; + + public FloatMultiDenseVector(VectorIterator decodedDocVector, BytesRef magnitudes, int numVectors, int dims) { + assert magnitudes.length == numVectors * Float.BYTES; + this.vectorValues = decodedDocVector; + this.magnitudes = magnitudes; + this.numVectors = numVectors; + this.dims = dims; + } + + @Override + public float maxSimDotProduct(float[][] query) { + vectorValues.reset(); + float[] maxes = new float[query.length]; + Arrays.fill(maxes, Float.NEGATIVE_INFINITY); + while (vectorValues.hasNext()) { + float[] vv = vectorValues.next(); + for (int i = 0; i < query.length; i++) { + maxes[i] = Math.max(maxes[i], VectorUtil.dotProduct(query[i], vv)); + } + } + float sum = 0; + for (float m : maxes) { + sum += m; + } + return sum; + } + + @Override + public float maxSimDotProduct(byte[][] query) { + throw new UnsupportedOperationException("use [float maxSimDotProduct(float[][] queryVector)] instead"); + } + + @Override + public float maxSimInvHamming(byte[][] query) { + throw new UnsupportedOperationException("hamming distance is not supported for float vectors"); + } + + @Override + public Iterator getVectors() { + return vectorValues.copy(); + } + + @Override + public float[] getMagnitudes() { + if (magnitudesArray == null) { + magnitudesArray = getMultiMagnitudes(magnitudes); + } + return magnitudesArray; + } + + @Override + public boolean isEmpty() { + return false; + } + + @Override + public int getDims() { + return dims; + } + + @Override + public int size() { + return numVectors; + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..c7ac7842afd96 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/FloatMultiDenseVectorDocValuesField.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.FloatBuffer; +import java.util.Iterator; + +public class FloatMultiDenseVectorDocValuesField extends MultiDenseVectorDocValuesField { + + private final BinaryDocValues input; + private final BinaryDocValues magnitudes; + private boolean decoded; + private final int dims; + private BytesRef value; + private BytesRef magnitudesValue; + private FloatVectorIterator vectorValues; + private int numVectors; + private float[] buffer; + + public FloatMultiDenseVectorDocValuesField( + BinaryDocValues input, + BinaryDocValues magnitudes, + String name, + ElementType elementType, + int dims + ) { + super(name, elementType); + this.input = input; + this.magnitudes = magnitudes; + this.dims = dims; + this.buffer = new float[dims]; + } + + @Override + public void setNextDocId(int docId) throws IOException { + decoded = false; + if (input.advanceExact(docId)) { + boolean magnitudesFound = magnitudes.advanceExact(docId); + assert magnitudesFound; + + value = input.binaryValue(); + assert value.length % (Float.BYTES * dims) == 0; + numVectors = value.length / (Float.BYTES * dims); + magnitudesValue = magnitudes.binaryValue(); + assert magnitudesValue.length == (Float.BYTES * numVectors); + } else { + value = null; + magnitudesValue = null; + numVectors = 0; + } + } + + @Override + public MultiDenseVectorScriptDocValues toScriptDocValues() { + return new MultiDenseVectorScriptDocValues(this, dims); + } + + @Override + public boolean isEmpty() { + return value == null; + } + + @Override + public MultiDenseVector get() { + if (isEmpty()) { + return MultiDenseVector.EMPTY; + } + decodeVectorIfNecessary(); + return new FloatMultiDenseVector(vectorValues, magnitudesValue, numVectors, dims); + } + + @Override + public MultiDenseVector get(MultiDenseVector defaultValue) { + if (isEmpty()) { + return defaultValue; + } + decodeVectorIfNecessary(); + return new FloatMultiDenseVector(vectorValues, magnitudesValue, numVectors, dims); + } + + @Override + public MultiDenseVector getInternal() { + return get(null); + } + + @Override + public int size() { + return value == null ? 0 : value.length / (Float.BYTES * dims); + } + + private void decodeVectorIfNecessary() { + if (decoded == false && value != null) { + vectorValues = new FloatVectorIterator(value, buffer, numVectors); + decoded = true; + } + } + + static class FloatVectorIterator implements VectorIterator { + private final float[] buffer; + private final FloatBuffer vectorValues; + private final BytesRef vectorValueBytesRef; + private final int size; + private int idx = 0; + + FloatVectorIterator(BytesRef vectorValues, float[] buffer, int size) { + assert vectorValues.length == (buffer.length * Float.BYTES * size); + this.vectorValueBytesRef = vectorValues; + this.vectorValues = ByteBuffer.wrap(vectorValues.bytes, vectorValues.offset, vectorValues.length) + .order(ByteOrder.LITTLE_ENDIAN) + .asFloatBuffer(); + this.size = size; + this.buffer = buffer; + } + + @Override + public boolean hasNext() { + return idx < size; + } + + @Override + public float[] next() { + if (hasNext() == false) { + throw new IllegalArgumentException("No more elements in the iterator"); + } + vectorValues.get(buffer); + idx++; + return buffer; + } + + @Override + public Iterator copy() { + return new FloatVectorIterator(vectorValueBytesRef, new float[buffer.length], size); + } + + @Override + public void reset() { + idx = 0; + vectorValues.rewind(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVector.java new file mode 100644 index 0000000000000..7d948cf5a74fa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVector.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import java.util.Iterator; + +public interface MultiDenseVector { + + default void checkDimensions(int qvDims) { + checkDimensions(getDims(), qvDims); + } + + float maxSimDotProduct(float[][] query); + + float maxSimDotProduct(byte[][] query); + + float maxSimInvHamming(byte[][] query); + + Iterator getVectors(); + + float[] getMagnitudes(); + + boolean isEmpty(); + + int getDims(); + + int size(); + + static void checkDimensions(int dvDims, int qvDims) { + if (dvDims != qvDims) { + throw new IllegalArgumentException( + "The query vector has a different number of dimensions [" + qvDims + "] than the document vectors [" + dvDims + "]." + ); + } + } + + private static String badQueryVectorType(Object queryVector) { + return "Cannot use vector [" + queryVector + "] with class [" + queryVector.getClass().getName() + "] as query vector"; + } + + MultiDenseVector EMPTY = new MultiDenseVector() { + public static final String MISSING_VECTOR_FIELD_MESSAGE = "Multi Dense vector value missing for a field," + + " use isEmpty() to check for a missing vector value"; + + @Override + public Iterator getVectors() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public float[] getMagnitudes() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public boolean isEmpty() { + return true; + } + + @Override + public int getDims() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public float maxSimDotProduct(float[][] query) { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public float maxSimDotProduct(byte[][] query) { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public float maxSimInvHamming(byte[][] query) { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public int size() { + return 0; + } + }; +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVectorDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVectorDocValuesField.java new file mode 100644 index 0000000000000..61ae4304683c8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/MultiDenseVectorDocValuesField.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValues; +import org.elasticsearch.script.field.AbstractScriptFieldFactory; +import org.elasticsearch.script.field.DocValuesScriptFieldFactory; +import org.elasticsearch.script.field.Field; + +import java.util.Iterator; + +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; + +public abstract class MultiDenseVectorDocValuesField extends AbstractScriptFieldFactory + implements + Field, + DocValuesScriptFieldFactory, + MultiDenseVectorScriptDocValues.MultiDenseVectorSupplier { + protected final String name; + protected final ElementType elementType; + + public MultiDenseVectorDocValuesField(String name, ElementType elementType) { + this.name = name; + this.elementType = elementType; + } + + @Override + public String getName() { + return name; + } + + public ElementType getElementType() { + return elementType; + } + + /** + * Get the DenseVector for a document if one exists, DenseVector.EMPTY otherwise + */ + public abstract MultiDenseVector get(); + + public abstract MultiDenseVector get(MultiDenseVector defaultValue); + + public abstract MultiDenseVectorScriptDocValues toScriptDocValues(); + + // DenseVector fields are single valued, so Iterable does not make sense. + @Override + public Iterator iterator() { + throw new UnsupportedOperationException("Cannot iterate over single valued multi_dense_vector field, use get() instead"); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/VectorIterator.java b/server/src/main/java/org/elasticsearch/script/field/vectors/VectorIterator.java new file mode 100644 index 0000000000000..b8615ac877254 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/VectorIterator.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import java.util.Iterator; + +public interface VectorIterator extends Iterator { + Iterator copy(); + + void reset(); + + static VectorIterator from(float[][] vectors) { + return new VectorIterator<>() { + private int i = 0; + + @Override + public boolean hasNext() { + return i < vectors.length; + } + + @Override + public float[] next() { + return vectors[i++]; + } + + @Override + public Iterator copy() { + return from(vectors); + } + + @Override + public void reset() { + i = 0; + } + }; + } + + static VectorIterator from(byte[][] vectors) { + return new VectorIterator<>() { + private int i = 0; + + @Override + public boolean hasNext() { + return i < vectors.length; + } + + @Override + public byte[] next() { + return vectors[i++]; + } + + @Override + public Iterator copy() { + return from(vectors); + } + + @Override + public void reset() { + i = 0; + } + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index b8f50c6f9a62f..09e25350ad4fd 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -231,7 +231,6 @@ import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankShardResult; import org.elasticsearch.search.rank.feature.RankFeatureDoc; -import org.elasticsearch.search.rank.feature.RankFeatureShardPhase; import org.elasticsearch.search.rank.feature.RankFeatureShardResult; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescorerBuilder; @@ -1299,10 +1298,6 @@ private void registerQuery(QuerySpec spec) { ); } - public RankFeatureShardPhase getRankFeatureShardPhase() { - return new RankFeatureShardPhase(); - } - public FetchPhase getFetchPhase() { return new FetchPhase(fetchSubPhases); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index a11c4013a9c9b..e17709ed78318 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -73,7 +73,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; -import org.elasticsearch.node.ResponseCollectorService; import org.elasticsearch.script.FieldScript; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.AggregationInitializationException; @@ -279,14 +278,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final ScriptService scriptService; - private final ResponseCollectorService responseCollectorService; - private final ExecutorSelector executorSelector; private final BigArrays bigArrays; private final FetchPhase fetchPhase; - private final RankFeatureShardPhase rankFeatureShardPhase; private volatile Executor searchExecutor; private volatile boolean enableQueryPhaseParallelCollection; @@ -325,9 +321,7 @@ public SearchService( ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, - RankFeatureShardPhase rankFeatureShardPhase, FetchPhase fetchPhase, - ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, ExecutorSelector executorSelector, Tracer tracer @@ -337,9 +331,7 @@ public SearchService( this.clusterService = clusterService; this.indicesService = indicesService; this.scriptService = scriptService; - this.responseCollectorService = responseCollectorService; this.bigArrays = bigArrays; - this.rankFeatureShardPhase = rankFeatureShardPhase; this.fetchPhase = fetchPhase; this.multiBucketConsumerService = new MultiBucketConsumerService( clusterService, @@ -751,9 +743,9 @@ public void executeRankFeaturePhase(RankFeatureShardRequest request, SearchShard searchContext.rankFeatureResult().incRef(); return searchContext.rankFeatureResult(); } - rankFeatureShardPhase.prepareForFetch(searchContext, request); + RankFeatureShardPhase.prepareForFetch(searchContext, request); fetchPhase.execute(searchContext, docIds, null); - rankFeatureShardPhase.processFetch(searchContext); + RankFeatureShardPhase.processFetch(searchContext); var rankFeatureResult = searchContext.rankFeatureResult(); rankFeatureResult.incRef(); return rankFeatureResult; @@ -1538,10 +1530,6 @@ public int getOpenScrollContexts() { return openScrollContexts.get(); } - public ResponseCollectorService getResponseCollectorService() { - return this.responseCollectorService; - } - public long getDefaultKeepAliveInMillis() { return defaultKeepAlive; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java index b4d5512331b42..d08a76e51c6bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java @@ -10,6 +10,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.search.profile.aggregation.InternalAggregationProfileTree; @@ -98,10 +99,10 @@ public final void postCollection() throws IOException { } @Override - public final InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public final InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { InternalAggregation[] delegateResults = delegate.buildAggregations(owningBucketOrds); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { + InternalAggregation[] result = new InternalAggregation[Math.toIntExact(owningBucketOrds.size())]; + for (int ordIdx = 0; ordIdx < result.length; ordIdx++) { result[ordIdx] = adapt(delegateResults[ordIdx]); } return result; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index 0d36469dddfdc..aa8d9fba554c1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -13,6 +13,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.sort.SortOrder; @@ -142,7 +144,7 @@ public interface BucketComparator { * @return the results for each ordinal, in the same order as the array * of ordinals */ - public abstract InternalAggregation[] buildAggregations(long[] ordsToCollect) throws IOException; + public abstract InternalAggregation[] buildAggregations(LongArray ordsToCollect) throws IOException; /** * Release this aggregation and its sub-aggregations. @@ -153,11 +155,11 @@ public interface BucketComparator { * Build the result of this aggregation if it is at the "top level" * of the aggregation tree. If, instead, it is a sub-aggregation of * another aggregation then the aggregation that contains it will call - * {@link #buildAggregations(long[])}. + * {@link #buildAggregations(LongArray)}. */ public final InternalAggregation buildTopLevel() throws IOException { assert parent() == null; - return buildAggregations(new long[] { 0 })[0]; + return buildAggregations(BigArrays.NON_RECYCLING_INSTANCE.newLongArray(1, true))[0]; } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index bf9116207b375..11444edca080d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -13,6 +13,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.CheckedIntFunction; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.Maps; @@ -40,7 +42,7 @@ public abstract class AggregatorBase extends Aggregator { protected final String name; protected final Aggregator parent; - private final AggregationContext context; + protected final AggregationContext context; private final Map metadata; protected final Aggregator[] subAggregators; @@ -48,6 +50,8 @@ public abstract class AggregatorBase extends Aggregator { private Map subAggregatorbyName; private long requestBytesUsed; + private final CircuitBreaker breaker; + private int callCount; /** * Constructs a new Aggregator. @@ -72,6 +76,7 @@ protected AggregatorBase( this.metadata = metadata; this.parent = parent; this.context = context; + this.breaker = context.breaker(); assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead"; this.subAggregators = factories.createSubAggregators(this, subAggregatorCardinality); context.addReleasable(this); @@ -327,6 +332,30 @@ protected final InternalAggregations buildEmptySubAggregations() { return InternalAggregations.from(aggs); } + /** + * Builds the aggregations array with the provided size and populates it using the provided function. + */ + protected final InternalAggregation[] buildAggregations(int size, CheckedIntFunction aggFunction) + throws IOException { + final InternalAggregation[] results = new InternalAggregation[size]; + for (int i = 0; i < results.length; i++) { + checkRealMemoryCB("internal_aggregation"); + results[i] = aggFunction.apply(i); + } + return results; + } + + /** + * This method calls the circuit breaker from time to time in order to give it a chance to check available + * memory in the parent breaker (Which should be a real memory breaker) and break the execution if we are running out. + * To achieve that, we are passing 0 as the estimated bytes every 1024 calls + */ + protected final void checkRealMemoryCB(String label) { + if ((++callCount & 0x3FF) == 0) { + breaker.addEstimateBytesAndMaybeBreak(0, label); + } + } + @Override public String toString() { return name; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 5ba39c640abdc..ef21e4103fd88 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -220,7 +220,7 @@ private List reducePipelineBuckets(AggregationReduceContext reduceContext, Pi return reducedBuckets; } - public abstract static class InternalBucket implements Bucket, Writeable { + public abstract static class InternalBucket implements Bucket { public Object getProperty(String containingAggName, List path) { if (path.isEmpty()) { @@ -248,4 +248,8 @@ public Object getProperty(String containingAggName, List path) { return aggregation.getProperty(path.subList(1, path.size())); } } + + /** A {@link InternalBucket} that implements the {@link Writeable} interface. Most implementation might want + * to use this one except when specific logic is need to write into the stream. */ + public abstract static class InternalBucketWritable extends InternalBucket implements Writeable {} } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java index 8accc6b15d820..a32211fd4d8fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -39,11 +40,7 @@ public final LeafBucketCollector getLeafCollector(AggregationExecutionContext ag } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - results[ordIdx] = buildEmptyAggregation(); - } - return results; + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> buildEmptyAggregation()); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index 231130c920349..44d76d31be0e7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -20,6 +20,7 @@ import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -146,7 +147,7 @@ public void postCollection() throws IOException { * Replay the wrapped collector, but only on a selection of buckets. */ @Override - public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { + public void prepareSelectedBuckets(LongArray selectedBuckets) throws IOException { if (finished == false) { throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); } @@ -154,9 +155,9 @@ public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { throw new IllegalStateException("Already been replayed"); } - this.selectedBuckets = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); - for (long ord : selectedBuckets) { - this.selectedBuckets.add(ord); + this.selectedBuckets = new LongHash(selectedBuckets.size(), BigArrays.NON_RECYCLING_INSTANCE); + for (long i = 0; i < selectedBuckets.size(); i++) { + this.selectedBuckets.add(selectedBuckets.get(i)); } boolean needsScores = scoreMode().needsScores(); @@ -232,21 +233,22 @@ private static void failInCaseOfBadScorer(String message) { * been collected directly. */ @Override - public Aggregator wrap(final Aggregator in) { + public Aggregator wrap(final Aggregator in, BigArrays bigArrays) { return new WrappedAggregator(in) { @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { if (selectedBuckets == null) { throw new IllegalStateException("Collection has not been replayed yet."); } - long[] rebasedOrds = new long[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - rebasedOrds[ordIdx] = selectedBuckets.find(owningBucketOrds[ordIdx]); - if (rebasedOrds[ordIdx] == -1) { - throw new IllegalStateException("Cannot build for a bucket which has not been collected"); + try (LongArray rebasedOrds = bigArrays.newLongArray(owningBucketOrds.size())) { + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + rebasedOrds.set(ordIdx, selectedBuckets.find(owningBucketOrds.get(ordIdx))); + if (rebasedOrds.get(ordIdx) == -1) { + throw new IllegalStateException("Cannot build for a bucket which has not been collected"); + } } + return in.buildAggregations(rebasedOrds); } - return in.buildAggregations(rebasedOrds); } }; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index e6c26c4278807..e86c7127ec2f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -9,8 +9,9 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.search.aggregations.AggregationErrors; import org.elasticsearch.search.aggregations.Aggregator; @@ -40,10 +41,9 @@ import java.util.function.ToLongFunction; public abstract class BucketsAggregator extends AggregatorBase { - private final CircuitBreaker breaker; + private LongArray docCounts; protected final DocCountProvider docCountProvider; - private int callCount; @SuppressWarnings("this-escape") public BucketsAggregator( @@ -55,7 +55,6 @@ public BucketsAggregator( Map metadata ) throws IOException { super(name, factories, aggCtx, parent, bucketCardinality, metadata); - breaker = aggCtx.breaker(); docCounts = bigArrays().newLongArray(1, true); docCountProvider = new DocCountProvider(); } @@ -81,7 +80,7 @@ public final void collectBucket(LeafBucketCollector subCollector, int doc, long grow(bucketOrd + 1); int docCount = docCountProvider.getDocCount(doc); if (docCounts.increment(bucketOrd, docCount) == docCount) { - updateCircuitBreaker("allocated_buckets"); + checkRealMemoryCB("allocated_buckets"); } subCollector.collect(doc, bucketOrd); } @@ -106,7 +105,6 @@ public final void rewriteBuckets(long newNumBuckets, LongUnaryOperator mergeMap) try { docCounts = bigArrays().newLongArray(newNumBuckets, true); success = true; - docCounts.fill(0, newNumBuckets, 0); for (long i = 0; i < oldDocCounts.size(); i++) { long docCount = oldDocCounts.get(i); @@ -155,26 +153,27 @@ public final long bucketDocCount(long bucketOrd) { /** * Hook to allow taking an action before building the sub agg results. */ - protected void prepareSubAggs(long[] ordsToCollect) throws IOException {} + protected void prepareSubAggs(LongArray ordsToCollect) throws IOException {} /** * Build the results of the sub-aggregations of the buckets at each of * the provided ordinals. *

* Most aggregations should probably use something like - * {@link #buildSubAggsForAllBuckets(Object[][], ToLongFunction, BiConsumer)} - * or {@link #buildAggregationsForVariableBuckets(long[], LongKeyedBucketOrds, BucketBuilderForVariable, ResultBuilderForVariable)} - * or {@link #buildAggregationsForFixedBucketCount(long[], int, BucketBuilderForFixedCount, Function)} - * or {@link #buildAggregationsForSingleBucket(long[], SingleBucketResultBuilder)} + * {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)} + * or {@link #buildSubAggsForAllBuckets(ObjectArray, ToLongFunction, BiConsumer)} + * or {@link #buildAggregationsForVariableBuckets(LongArray, LongKeyedBucketOrds, BucketBuilderForVariable, ResultBuilderForVariable)} + * or {@link #buildAggregationsForFixedBucketCount(LongArray, int, BucketBuilderForFixedCount, Function)} + * or {@link #buildAggregationsForSingleBucket(LongArray, SingleBucketResultBuilder)} * instead of calling this directly. * @return the sub-aggregation results in the same order as the provided * array of ordinals */ - protected final IntFunction buildSubAggsForBuckets(long[] bucketOrdsToCollect) throws IOException { + protected final IntFunction buildSubAggsForBuckets(LongArray bucketOrdsToCollect) throws IOException { prepareSubAggs(bucketOrdsToCollect); InternalAggregation[][] aggregations = new InternalAggregation[subAggregators.length][]; for (int i = 0; i < subAggregators.length; i++) { - updateCircuitBreaker("building_sub_aggregation"); + checkRealMemoryCB("building_sub_aggregation"); aggregations[i] = subAggregators[i].buildAggregations(bucketOrdsToCollect); } return subAggsForBucketFunction(aggregations); @@ -195,35 +194,53 @@ public int size() { } /** - * Build the sub aggregation results for a list of buckets and set them on - * the buckets. This is usually used by aggregations that are selective - * in which bucket they build. They use some mechanism of selecting a list - * of buckets to build use this method to "finish" building the results. + * Similarly to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)} + * but it needs to build the bucket ordinals. This method usually requires for buckets + * to contain the bucket ordinal. * @param buckets the buckets to finish building * @param bucketToOrd how to convert a bucket into an ordinal * @param setAggs how to set the sub-aggregation results on a bucket */ protected final void buildSubAggsForAllBuckets( - B[][] buckets, + ObjectArray buckets, ToLongFunction bucketToOrd, BiConsumer setAggs ) throws IOException { - int totalBucketOrdsToCollect = 0; - for (B[] bucketsForOneResult : buckets) { - totalBucketOrdsToCollect += bucketsForOneResult.length; + long totalBucketOrdsToCollect = 0; + for (long b = 0; b < buckets.size(); b++) { + totalBucketOrdsToCollect += buckets.get(b).length; } - long[] bucketOrdsToCollect = new long[totalBucketOrdsToCollect]; - int s = 0; - for (B[] bucketsForOneResult : buckets) { - for (B bucket : bucketsForOneResult) { - bucketOrdsToCollect[s++] = bucketToOrd.applyAsLong(bucket); + + try (LongArray bucketOrdsToCollect = bigArrays().newLongArray(totalBucketOrdsToCollect)) { + int s = 0; + for (long ord = 0; ord < buckets.size(); ord++) { + for (B bucket : buckets.get(ord)) { + bucketOrdsToCollect.set(s++, bucketToOrd.applyAsLong(bucket)); + } } + buildSubAggsForAllBuckets(buckets, bucketOrdsToCollect, setAggs); } + } + + /** + * Build the sub aggregation results for a list of buckets and set them on + * the buckets. This is usually used by aggregations that are selective + * in which bucket they build. They use some mechanism of selecting a list + * of buckets to build use this method to "finish" building the results. + * @param buckets the buckets to finish building + * @param bucketOrdsToCollect bucket ordinals + * @param setAggs how to set the sub-aggregation results on a bucket + */ + protected final void buildSubAggsForAllBuckets( + ObjectArray buckets, + LongArray bucketOrdsToCollect, + BiConsumer setAggs + ) throws IOException { var results = buildSubAggsForBuckets(bucketOrdsToCollect); - s = 0; - for (B[] bucket : buckets) { - for (int b = 0; b < bucket.length; b++) { - setAggs.accept(bucket[b], results.apply(s++)); + int s = 0; + for (long ord = 0; ord < buckets.size(); ord++) { + for (B value : buckets.get(ord)) { + setAggs.accept(value, results.apply(s++)); } } } @@ -237,37 +254,37 @@ protected final void buildSubAggsForAllBuckets( * @param resultBuilder how to build a result from buckets */ protected final InternalAggregation[] buildAggregationsForFixedBucketCount( - long[] owningBucketOrds, + LongArray owningBucketOrds, int bucketsPerOwningBucketOrd, BucketBuilderForFixedCount bucketBuilder, Function, InternalAggregation> resultBuilder ) throws IOException { - int totalBuckets = owningBucketOrds.length * bucketsPerOwningBucketOrd; - long[] bucketOrdsToCollect = new long[totalBuckets]; - int bucketOrdIdx = 0; - for (long owningBucketOrd : owningBucketOrds) { - long ord = owningBucketOrd * bucketsPerOwningBucketOrd; - for (int offsetInOwningOrd = 0; offsetInOwningOrd < bucketsPerOwningBucketOrd; offsetInOwningOrd++) { - bucketOrdsToCollect[bucketOrdIdx++] = ord++; - } - } - bucketOrdIdx = 0; - var subAggregationResults = buildSubAggsForBuckets(bucketOrdsToCollect); - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) { - List buckets = new ArrayList<>(bucketsPerOwningBucketOrd); - for (int offsetInOwningOrd = 0; offsetInOwningOrd < bucketsPerOwningBucketOrd; offsetInOwningOrd++) { - buckets.add( - bucketBuilder.build( - offsetInOwningOrd, - bucketDocCount(bucketOrdsToCollect[bucketOrdIdx]), - subAggregationResults.apply(bucketOrdIdx++) - ) - ); + try (LongArray bucketOrdsToCollect = bigArrays().newLongArray(owningBucketOrds.size() * bucketsPerOwningBucketOrd)) { + final int[] bucketOrdIdx = new int[] { 0 }; + for (long i = 0; i < owningBucketOrds.size(); i++) { + long ord = owningBucketOrds.get(i) * bucketsPerOwningBucketOrd; + for (int offsetInOwningOrd = 0; offsetInOwningOrd < bucketsPerOwningBucketOrd; offsetInOwningOrd++) { + bucketOrdsToCollect.set(bucketOrdIdx[0]++, ord++); + } } - results[owningOrdIdx] = resultBuilder.apply(buckets); + bucketOrdIdx[0] = 0; + var subAggregationResults = buildSubAggsForBuckets(bucketOrdsToCollect); + + return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { + List buckets = new ArrayList<>(bucketsPerOwningBucketOrd); + for (int offsetInOwningOrd = 0; offsetInOwningOrd < bucketsPerOwningBucketOrd; offsetInOwningOrd++) { + checkRealMemoryCBForInternalBucket(); + buckets.add( + bucketBuilder.build( + offsetInOwningOrd, + bucketDocCount(bucketOrdsToCollect.get(bucketOrdIdx[0])), + subAggregationResults.apply(bucketOrdIdx[0]++) + ) + ); + } + return resultBuilder.apply(buckets); + }); } - return results; } @FunctionalInterface @@ -280,19 +297,20 @@ protected interface BucketBuilderForFixedCount { * @param owningBucketOrds owning bucket ordinals for which to build the results * @param resultBuilder how to build a result from the sub aggregation results */ - protected final InternalAggregation[] buildAggregationsForSingleBucket(long[] owningBucketOrds, SingleBucketResultBuilder resultBuilder) - throws IOException { + protected final InternalAggregation[] buildAggregationsForSingleBucket( + LongArray owningBucketOrds, + SingleBucketResultBuilder resultBuilder + ) throws IOException { /* * It'd be entirely reasonable to call * `consumeBucketsAndMaybeBreak(owningBucketOrds.length)` * here but we don't because single bucket aggs never have. */ var subAggregationResults = buildSubAggsForBuckets(owningBucketOrds); - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - results[ordIdx] = resultBuilder.build(owningBucketOrds[ordIdx], subAggregationResults.apply(ordIdx)); - } - return results; + return buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> resultBuilder.build(owningBucketOrds.get(ordIdx), subAggregationResults.apply(ordIdx)) + ); } @FunctionalInterface @@ -307,54 +325,59 @@ protected interface SingleBucketResultBuilder { * @param bucketOrds hash of values to the bucket ordinal */ protected final InternalAggregation[] buildAggregationsForVariableBuckets( - long[] owningBucketOrds, + LongArray owningBucketOrds, LongKeyedBucketOrds bucketOrds, BucketBuilderForVariable bucketBuilder, ResultBuilderForVariable resultBuilder ) throws IOException { long totalOrdsToCollect = 0; - final int[] bucketsInOrd = new int[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - final long bucketCount = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); - bucketsInOrd[ordIdx] = (int) bucketCount; - totalOrdsToCollect += bucketCount; - } - if (totalOrdsToCollect > Integer.MAX_VALUE) { - // TODO: We should instrument this error. While it is correct for it to be a 400 class IllegalArgumentException, there is not - // much the user can do about that. If this occurs with any frequency, we should do something about it. - throw new IllegalArgumentException( - "Can't collect more than [" + Integer.MAX_VALUE + "] buckets but attempted [" + totalOrdsToCollect + "]" - ); - } - long[] bucketOrdsToCollect = new long[(int) totalOrdsToCollect]; - int b = 0; - for (long owningBucketOrd : owningBucketOrds) { - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); - while (ordsEnum.next()) { - bucketOrdsToCollect[b++] = ordsEnum.ord(); + try (IntArray bucketsInOrd = bigArrays().newIntArray(owningBucketOrds.size())) { + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long bucketCount = bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)); + bucketsInOrd.set(ordIdx, (int) bucketCount); + totalOrdsToCollect += bucketCount; } - } - var subAggregationResults = buildSubAggsForBuckets(bucketOrdsToCollect); - - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - b = 0; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - List buckets = new ArrayList<>(bucketsInOrd[ordIdx]); - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - if (bucketOrdsToCollect[b] != ordsEnum.ord()) { - // If we hit this, something has gone horribly wrong and we need to investigate - throw AggregationErrors.iterationOrderChangedWithoutMutating( - bucketOrds.toString(), - ordsEnum.ord(), - bucketOrdsToCollect[b] - ); + if (totalOrdsToCollect > Integer.MAX_VALUE) { + // TODO: We should instrument this error. While it is correct for it to be a 400 class IllegalArgumentException, there is + // not + // much the user can do about that. If this occurs with any frequency, we should do something about it. + throw new IllegalArgumentException( + "Can't collect more than [" + Integer.MAX_VALUE + "] buckets but attempted [" + totalOrdsToCollect + "]" + ); + } + try (LongArray bucketOrdsToCollect = bigArrays().newLongArray(totalOrdsToCollect)) { + final int[] b = new int[] { 0 }; + for (long i = 0; i < owningBucketOrds.size(); i++) { + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(i)); + while (ordsEnum.next()) { + bucketOrdsToCollect.set(b[0]++, ordsEnum.ord()); + } } - buckets.add(bucketBuilder.build(ordsEnum.value(), bucketDocCount(ordsEnum.ord()), subAggregationResults.apply(b++))); + var subAggregationResults = buildSubAggsForBuckets(bucketOrdsToCollect); + + b[0] = 0; + return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + List buckets = new ArrayList<>(bucketsInOrd.get(ordIdx)); + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + while (ordsEnum.next()) { + if (bucketOrdsToCollect.get(b[0]) != ordsEnum.ord()) { + // If we hit this, something has gone horribly wrong and we need to investigate + throw AggregationErrors.iterationOrderChangedWithoutMutating( + bucketOrds.toString(), + ordsEnum.ord(), + bucketOrdsToCollect.get(b[0]) + ); + } + checkRealMemoryCBForInternalBucket(); + buckets.add( + bucketBuilder.build(ordsEnum.value(), bucketDocCount(ordsEnum.ord()), subAggregationResults.apply(b[0]++)) + ); + } + return resultBuilder.build(owningBucketOrd, buckets); + }); } - results[ordIdx] = resultBuilder.build(owningBucketOrds[ordIdx], buckets); } - return results; } @FunctionalInterface @@ -412,14 +435,9 @@ protected void preGetSubLeafCollectors(LeafReaderContext ctx) throws IOException docCountProvider.setLeafReaderContext(ctx); } - /** - * This method calls the circuit breaker from time to time in order to give it a chance to check available - * memory in the parent breaker (Which should be a real memory breaker) and break the execution if we are running out. - * To achieve that, we are passing 0 as the estimated bytes every 1024 calls - */ - private void updateCircuitBreaker(String label) { - if ((++callCount & 0x3FF) == 0) { - breaker.addEstimateBytesAndMaybeBreak(0, label); - } + /** This method should be called whenever a new bucket object is created. It will check the real memory + * circuit breaker in a sampling fashion. See {@link #checkRealMemoryCB(String)} */ + protected final void checkRealMemoryCBForInternalBucket() { + checkRealMemoryCB("internal_bucket"); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java index 84a15b6d1c0eb..64744b705e222 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BucketCollector; @@ -65,7 +66,7 @@ protected void doPreCollection() throws IOException { } deferredAggregations.add(subAggregators[i]); deferredAggregationNames.add(subAggregators[i].name()); - subAggregators[i] = deferringCollector.wrap(subAggregators[i]); + subAggregators[i] = deferringCollector.wrap(subAggregators[i], bigArrays()); } else { collectors.add(subAggregators[i]); } @@ -87,7 +88,7 @@ protected DeferringBucketCollector deferringCollector() { /** * Build the {@link DeferringBucketCollector}. The default implementation * replays all hits against the buckets selected by - * {#link {@link DeferringBucketCollector#prepareSelectedBuckets(long...)}. + * {#link {@link DeferringBucketCollector#prepareSelectedBuckets(LongArray)}. */ protected DeferringBucketCollector buildDeferringCollector() { return new BestBucketsDeferringCollector(topLevelQuery(), searcher(), descendsFromGlobalAggregator(parent())); @@ -107,7 +108,7 @@ protected boolean shouldDefer(Aggregator aggregator) { } @Override - protected final void prepareSubAggs(long[] bucketOrdsToCollect) throws IOException { + protected final void prepareSubAggs(LongArray bucketOrdsToCollect) throws IOException { if (deferringCollector != null) { deferringCollector.prepareSelectedBuckets(bucketOrdsToCollect); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java index 44cff2651e273..468fec29a9420 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java @@ -10,6 +10,8 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; @@ -37,13 +39,13 @@ public DeferringBucketCollector() {} /** * Replay the deferred hits on the selected buckets. */ - public abstract void prepareSelectedBuckets(long... selectedBuckets) throws IOException; + public abstract void prepareSelectedBuckets(LongArray selectedBuckets) throws IOException; /** * Wrap the provided aggregator so that it behaves (almost) as if it had * been collected directly. */ - public Aggregator wrap(final Aggregator in) { + public Aggregator wrap(final Aggregator in, BigArrays bigArrays) { return new WrappedAggregator(in); } @@ -80,7 +82,7 @@ public Aggregator subAggregator(String name) { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return in.buildAggregations(owningBucketOrds); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java index 87ebec525a6fa..d39e90b44579e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MultiBucketsAggregation.java @@ -12,7 +12,6 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.HasAggregations; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.xcontent.ToXContent; import java.util.List; @@ -24,7 +23,7 @@ public interface MultiBucketsAggregation extends Aggregation { * A bucket represents a criteria to which all documents that fall in it adhere to. It is also uniquely identified * by a key, and can potentially hold sub-aggregations computed over all documents in it. */ - interface Bucket extends HasAggregations, ToXContent { + interface Bucket extends HasAggregations { /** * @return The key associated with the bucket */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 9ee15306ce636..441b30f872a35 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.RoaringDocIdSet; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexSortConfig; @@ -184,50 +185,43 @@ protected void doPostCollection() throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { // Composite aggregator must be at the top of the aggregation tree - assert owningBucketOrds.length == 1 && owningBucketOrds[0] == 0L; + assert owningBucketOrds.size() == 1 && owningBucketOrds.get(0) == 0L; if (deferredCollectors != NO_OP_BUCKET_COLLECTOR) { // Replay all documents that contain at least one top bucket (collected during the first pass). runDeferredCollections(); } - int num = Math.min(size, (int) queue.size()); + final int num = Math.min(size, (int) queue.size()); final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; - long[] bucketOrdsToCollect = new long[(int) queue.size()]; - for (int i = 0; i < queue.size(); i++) { - bucketOrdsToCollect[i] = i; - } - var subAggsForBuckets = buildSubAggsForBuckets(bucketOrdsToCollect); - while (queue.size() > 0) { - int slot = queue.pop(); - CompositeKey key = queue.toCompositeKey(slot); - InternalAggregations aggs = subAggsForBuckets.apply(slot); - long docCount = queue.getDocCount(slot); - buckets[(int) queue.size()] = new InternalComposite.InternalBucket( - sourceNames, - formats, - key, - reverseMuls, - missingOrders, - docCount, - aggs - ); + try (LongArray bucketOrdsToCollect = bigArrays().newLongArray(queue.size())) { + for (int i = 0; i < queue.size(); i++) { + bucketOrdsToCollect.set(i, i); + } + var subAggsForBuckets = buildSubAggsForBuckets(bucketOrdsToCollect); + while (queue.size() > 0) { + int slot = queue.pop(); + CompositeKey key = queue.toCompositeKey(slot); + InternalAggregations aggs = subAggsForBuckets.apply(slot); + long docCount = queue.getDocCount(slot); + buckets[(int) queue.size()] = new InternalComposite.InternalBucket(sourceNames, formats, key, docCount, aggs); + } + CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null; + return new InternalAggregation[] { + new InternalComposite( + name, + size, + sourceNames, + formats, + Arrays.asList(buckets), + lastBucket, + reverseMuls, + missingOrders, + earlyTerminated, + metadata() + ) }; } - CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null; - return new InternalAggregation[] { - new InternalComposite( - name, - size, - sourceNames, - formats, - Arrays.asList(buckets), - lastBucket, - reverseMuls, - missingOrders, - earlyTerminated, - metadata() - ) }; } @Override @@ -244,6 +238,7 @@ public InternalAggregation buildEmptyAggregation() { false, metadata() ); + } private void finishLeaf() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 30c45ba46d9b7..1492e97e6a5a5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -19,7 +19,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.KeyComparable; import org.elasticsearch.search.aggregations.bucket.BucketReducer; import org.elasticsearch.search.aggregations.bucket.IteratorAndCurrent; import org.elasticsearch.search.aggregations.support.SamplingContext; @@ -103,7 +102,7 @@ public InternalComposite(StreamInput in) throws IOException { } this.reverseMuls = in.readIntArray(); this.missingOrders = in.readArray(MissingOrder::readFromStream, MissingOrder[]::new); - this.buckets = in.readCollectionAsList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls, missingOrders)); + this.buckets = in.readCollectionAsList((input) -> new InternalBucket(input, sourceNames, formats)); this.afterKey = in.readOptionalWriteable(CompositeKey::new); this.earlyTerminated = in.readBoolean(); } @@ -155,15 +154,7 @@ public InternalComposite create(List newBuckets) { @Override public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) { - return new InternalBucket( - prototype.sourceNames, - prototype.formats, - prototype.key, - prototype.reverseMuls, - prototype.missingOrders, - prototype.docCount, - aggregations - ); + return new InternalBucket(prototype.sourceNames, prototype.formats, prototype.key, prototype.docCount, aggregations); } public int getSize() { @@ -206,7 +197,7 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont private final PriorityQueue> pq = new PriorityQueue<>(size) { @Override protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { - return a.current().compareKey(b.current()) < 0; + return a.current().compareKey(b.current(), reverseMuls, missingOrders) < 0; } }; private boolean earlyTerminated = false; @@ -227,7 +218,7 @@ public InternalAggregation get() { final List result = new ArrayList<>(); while (pq.size() > 0) { IteratorAndCurrent top = pq.top(); - if (lastBucket != null && top.current().compareKey(lastBucket) != 0) { + if (lastBucket != null && top.current().compareKey(lastBucket, reverseMuls, missingOrders) != 0) { InternalBucket reduceBucket = reduceBucket(buckets, reduceContext); buckets.clear(); result.add(reduceBucket); @@ -306,7 +297,7 @@ private InternalBucket reduceBucket(List buckets, AggregationRed final var reducedFormats = reducer.getProto().formats; final long docCount = reducer.getDocCount(); final InternalAggregations aggs = reducer.getAggregations(); - return new InternalBucket(sourceNames, reducedFormats, reducer.getProto().key, reverseMuls, missingOrders, docCount, aggs); + return new InternalBucket(sourceNames, reducedFormats, reducer.getProto().key, docCount, aggs); } } @@ -329,16 +320,13 @@ public int hashCode() { return Objects.hash(super.hashCode(), size, buckets, afterKey, Arrays.hashCode(reverseMuls), Arrays.hashCode(missingOrders)); } - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements - CompositeAggregation.Bucket, - KeyComparable { + CompositeAggregation.Bucket { private final CompositeKey key; private final long docCount; private final InternalAggregations aggregations; - private final transient int[] reverseMuls; - private final transient MissingOrder[] missingOrders; private final transient List sourceNames; private final transient List formats; @@ -346,32 +334,20 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern List sourceNames, List formats, CompositeKey key, - int[] reverseMuls, - MissingOrder[] missingOrders, long docCount, InternalAggregations aggregations ) { this.key = key; this.docCount = docCount; this.aggregations = aggregations; - this.reverseMuls = reverseMuls; - this.missingOrders = missingOrders; this.sourceNames = sourceNames; this.formats = formats; } - InternalBucket( - StreamInput in, - List sourceNames, - List formats, - int[] reverseMuls, - MissingOrder[] missingOrders - ) throws IOException { + InternalBucket(StreamInput in, List sourceNames, List formats) throws IOException { this.key = new CompositeKey(in); this.docCount = in.readVLong(); this.aggregations = InternalAggregations.readFrom(in); - this.reverseMuls = reverseMuls; - this.missingOrders = missingOrders; this.sourceNames = sourceNames; this.formats = formats; } @@ -444,8 +420,7 @@ List getFormats() { return formats; } - @Override - public int compareKey(InternalBucket other) { + int compareKey(InternalBucket other, int[] reverseMuls, MissingOrder[] missingOrders) { for (int i = 0; i < key.size(); i++) { if (key.get(i) == null) { if (other.key.get(i) == null) { @@ -465,21 +440,11 @@ public int compareKey(InternalBucket other) { return 0; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} - */ - throw new UnsupportedOperationException("not implemented"); - } - InternalBucket finalizeSampling(SamplingContext samplingContext) { return new InternalBucket( sourceNames, formats, key, - reverseMuls, - missingOrders, samplingContext.scaleUp(docCount), InternalAggregations.finalizeSampling(aggregations, samplingContext) ); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java index af4d60bf424a7..344b90b06c4f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java @@ -13,6 +13,8 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -108,70 +110,80 @@ private void collectOrdinal(long bucketOrdinal, int doc, LeafBucketCollector sub } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - StringTerms.Bucket[][] topBucketsPerOrd = new StringTerms.Bucket[owningBucketOrds.length][]; - long[] otherDocCounts = new long[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); - - // as users can't control sort order, in practice we'll always sort by doc count descending - try ( - BucketPriorityQueue ordered = new BucketPriorityQueue<>( - size, - bigArrays(), - partiallyBuiltBucketComparator - ) - ) { - StringTerms.Bucket spare = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - Supplier emptyBucketBuilder = () -> new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (spare == null) { - spare = emptyBucketBuilder.get(); + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + try ( + LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size()); + ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size()) + ) { + for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { + int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); + + // as users can't control sort order, in practice we'll always sort by doc count descending + try ( + BucketPriorityQueue ordered = new BucketPriorityQueue<>( + size, + bigArrays(), + partiallyBuiltBucketComparator + ) + ) { + StringTerms.Bucket spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx)); + Supplier emptyBucketBuilder = () -> new StringTerms.Bucket( + new BytesRef(), + 0, + null, + false, + 0, + format + ); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = emptyBucketBuilder.get(); + } + ordsEnum.readValue(spare.getTermBytes()); + spare.setDocCount(docCount); + spare.setBucketOrd(ordsEnum.ord()); + spare = ordered.insertWithOverflow(spare); } - ordsEnum.readValue(spare.getTermBytes()); - spare.setDocCount(docCount); - spare.setBucketOrd(ordsEnum.ord()); - spare = ordered.insertWithOverflow(spare); - } - topBucketsPerOrd[ordIdx] = new StringTerms.Bucket[(int) ordered.size()]; - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd[ordIdx][i] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); - topBucketsPerOrd[ordIdx][i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd[ordIdx][i].getTermBytes())); + topBucketsPerOrd.set(ordIdx, new StringTerms.Bucket[(int) ordered.size()]); + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + topBucketsPerOrd.get(ordIdx)[i] = ordered.pop(); + otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount()); + topBucketsPerOrd.get(ordIdx)[i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd.get(ordIdx)[i].getTermBytes())); + } } } - } - buildSubAggsForAllBuckets(topBucketsPerOrd, InternalTerms.Bucket::getBucketOrd, InternalTerms.Bucket::setAggregations); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - final BucketOrder reduceOrder; - if (isKeyOrder(order) == false) { - reduceOrder = InternalOrder.key(true); - Arrays.sort(topBucketsPerOrd[ordIdx], reduceOrder.comparator()); - } else { - reduceOrder = order; - } - result[ordIdx] = new StringTerms( - name, - reduceOrder, - order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), - metadata(), - format, - bucketCountThresholds.getShardSize(), - false, - otherDocCounts[ordIdx], - Arrays.asList(topBucketsPerOrd[ordIdx]), - null - ); + buildSubAggsForAllBuckets(topBucketsPerOrd, InternalTerms.Bucket::getBucketOrd, InternalTerms.Bucket::setAggregations); + + return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { + final BucketOrder reduceOrder; + if (isKeyOrder(order) == false) { + reduceOrder = InternalOrder.key(true); + Arrays.sort(topBucketsPerOrd.get(ordIdx), reduceOrder.comparator()); + } else { + reduceOrder = order; + } + return new StringTerms( + name, + reduceOrder, + order, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + metadata(), + format, + bucketCountThresholds.getShardSize(), + false, + otherDocCounts.get(ordIdx), + Arrays.asList(topBucketsPerOrd.get(ordIdx)), + null + ); + }); } - return result; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java index fede97c7fddee..a9ec0ba878ec0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -208,21 +209,15 @@ List filters() { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForFixedBucketCount( owningBucketOrds, filters.size() + (otherBucketKey == null ? 0 : 1), (offsetInOwningOrd, docCount, subAggregationResults) -> { if (offsetInOwningOrd < filters.size()) { - return new InternalFilters.InternalBucket( - filters.get(offsetInOwningOrd).key(), - docCount, - subAggregationResults, - keyed, - keyedBucket - ); + return new InternalFilters.InternalBucket(filters.get(offsetInOwningOrd).key(), docCount, subAggregationResults); } - return new InternalFilters.InternalBucket(otherBucketKey, docCount, subAggregationResults, keyed, keyedBucket); + return new InternalFilters.InternalBucket(otherBucketKey, docCount, subAggregationResults); }, buckets -> new InternalFilters(name, buckets, keyed, keyedBucket, metadata()) ); @@ -233,12 +228,12 @@ public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(filters.size() + (otherBucketKey == null ? 0 : 1)); for (QueryToFilterAdapter filter : filters) { - InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs, keyed, keyedBucket); + InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(filter.key(), 0, subAggs); buckets.add(bucket); } if (otherBucketKey != null) { - InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(otherBucketKey, 0, subAggs, keyed, keyedBucket); + InternalFilters.InternalBucket bucket = new InternalFilters.InternalBucket(otherBucketKey, 0, subAggs); buckets.add(bucket); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java index a5dfb0d8efafa..19cd0df9c7122 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java @@ -30,28 +30,22 @@ import java.util.Objects; public class InternalFilters extends InternalMultiBucketAggregation implements Filters { - public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements Filters.Bucket { + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements Filters.Bucket { - private final boolean keyed; - private final boolean keyedBucket; private final String key; private long docCount; InternalAggregations aggregations; - public InternalBucket(String key, long docCount, InternalAggregations aggregations, boolean keyed, boolean keyedBucket) { + public InternalBucket(String key, long docCount, InternalAggregations aggregations) { this.key = key; - this.keyedBucket = keyedBucket; this.docCount = docCount; this.aggregations = aggregations; - this.keyed = keyed; } /** * Read from a stream. */ - public InternalBucket(StreamInput in, boolean keyed, boolean keyedBucket) throws IOException { - this.keyed = keyed; - this.keyedBucket = keyedBucket; + public InternalBucket(StreamInput in) throws IOException { key = in.readOptionalString(); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); @@ -84,8 +78,7 @@ public InternalAggregations getAggregations() { return aggregations; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params, boolean keyed, boolean keyedBucket) throws IOException { if (keyed && keyedBucket) { builder.startObject(key); } else { @@ -97,7 +90,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } @Override @@ -110,24 +102,20 @@ public boolean equals(Object other) { } InternalBucket that = (InternalBucket) other; return Objects.equals(key, that.key) - && Objects.equals(keyed, that.keyed) - && Objects.equals(keyedBucket, that.keyedBucket) && Objects.equals(docCount, that.docCount) && Objects.equals(aggregations, that.aggregations); } @Override public int hashCode() { - return Objects.hash(getClass(), key, keyed, keyedBucket, docCount, aggregations); + return Objects.hash(getClass(), key, docCount, aggregations); } InternalBucket finalizeSampling(SamplingContext samplingContext) { return new InternalBucket( key, samplingContext.scaleUp(docCount), - InternalAggregations.finalizeSampling(aggregations, samplingContext), - keyed, - keyedBucket + InternalAggregations.finalizeSampling(aggregations, samplingContext) ); } } @@ -155,7 +143,7 @@ public InternalFilters(StreamInput in) throws IOException { int size = in.readVInt(); List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { - buckets.add(new InternalBucket(in, keyed, keyedBucket)); + buckets.add(new InternalBucket(in)); } this.buckets = buckets; this.bucketMap = null; @@ -182,7 +170,7 @@ public InternalFilters create(List buckets) { @Override public InternalBucket createBucket(InternalAggregations aggregations, InternalBucket prototype) { - return new InternalBucket(prototype.key, prototype.docCount, aggregations, prototype.keyed, keyedBucket); + return new InternalBucket(prototype.key, prototype.docCount, aggregations); } @Override @@ -211,7 +199,7 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont ) { @Override protected InternalBucket createBucket(InternalBucket proto, long docCount, InternalAggregations aggregations) { - return new InternalBucket(proto.key, docCount, aggregations, proto.keyed, proto.keyedBucket); + return new InternalBucket(proto.key, docCount, aggregations); } }; @@ -252,7 +240,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (InternalBucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, keyed, keyedBucket); } if (keyed && keyedBucket) { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index cc677605c4528..85c79df42a714 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -11,17 +11,24 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; -class BucketPriorityQueue extends ObjectArrayPriorityQueue { +import java.util.function.Function; - BucketPriorityQueue(int size, BigArrays bigArrays) { +class BucketPriorityQueue extends ObjectArrayPriorityQueue { + + private final Function bucketSupplier; + + BucketPriorityQueue(int size, BigArrays bigArrays, Function bucketSupplier) { super(size, bigArrays); + this.bucketSupplier = bucketSupplier; } @Override - protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { - int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); + protected boolean lessThan(A o1, A o2) { + final B b1 = bucketSupplier.apply(o1); + final B b2 = bucketSupplier.apply(o2); + int cmp = Long.compare(b2.getDocCount(), b1.getDocCount()); if (cmp == 0) { - cmp = o2.compareTo(o1); + cmp = b2.compareTo(b1); if (cmp == 0) { cmp = System.identityHashCode(o2) - System.identityHashCode(o1); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java index cde26bb2214ed..b84dff6e73e0b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -12,6 +12,9 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -21,6 +24,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd; import org.elasticsearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -132,39 +136,58 @@ public void collect(int doc, long owningBucketOrd) throws IOException { protected abstract InternalGeoGridBucket newEmptyBucket(); @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalGeoGridBucket[][] topBucketsPerOrd = new InternalGeoGridBucket[owningBucketOrds.length][]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - - try (BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, bigArrays())) { - InternalGeoGridBucket spare = null; - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - if (spare == null) { - spare = newEmptyBucket(); - } - - // need a special function to keep the source bucket - // up-to-date so it can get the appropriate key - spare.hashAsLong = ordsEnum.value(); - spare.docCount = bucketDocCount(ordsEnum.ord()); - spare.bucketOrd = ordsEnum.ord(); - spare = ordered.insertWithOverflow(spare); + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + + try (ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size())) { + try (IntArray bucketsSizePerOrd = bigArrays().newIntArray(owningBucketOrds.size())) { + long ordsToCollect = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), shardSize); + ordsToCollect += size; + bucketsSizePerOrd.set(ordIdx, size); } - - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[(int) ordered.size()]; - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd[ordIdx][i] = ordered.pop(); + try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) { + long ordsCollected = 0; + for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { + try ( + BucketPriorityQueue, InternalGeoGridBucket> ordered = + new BucketPriorityQueue<>(bucketsSizePerOrd.get(ordIdx), bigArrays(), b -> b.bucket) + ) { + BucketAndOrd spare = null; + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx)); + while (ordsEnum.next()) { + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new BucketAndOrd<>(newEmptyBucket()); + } + + // need a special function to keep the source bucket + // up-to-date so it can get the appropriate key + spare.bucket.hashAsLong = ordsEnum.value(); + spare.bucket.docCount = bucketDocCount(ordsEnum.ord()); + spare.ord = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } + final int orderedSize = (int) ordered.size(); + final InternalGeoGridBucket[] buckets = new InternalGeoGridBucket[orderedSize]; + for (int i = orderedSize - 1; i >= 0; --i) { + BucketAndOrd bucketBucketAndOrd = ordered.pop(); + buckets[i] = bucketBucketAndOrd.bucket; + ordsArray.set(ordsCollected + i, bucketBucketAndOrd.ord); + } + topBucketsPerOrd.set(ordIdx, buckets); + ordsCollected += orderedSize; + } + } + assert ordsCollected == ordsArray.size(); + buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs); } } + return buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> buildAggregation(name, requiredSize, Arrays.asList(topBucketsPerOrd.get(ordIdx)), metadata()) + ); } - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - results[ordIdx] = buildAggregation(name, requiredSize, Arrays.asList(topBucketsPerOrd[ordIdx]), metadata()); - } - return results; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index d56625ab28c51..343c92b353884 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import static java.util.Collections.unmodifiableList; @@ -106,7 +107,13 @@ public InternalAggregation get() { final int size = Math.toIntExact( context.isFinalReduce() == false ? bucketsReducer.size() : Math.min(requiredSize, bucketsReducer.size()) ); - try (BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, context.bigArrays())) { + try ( + BucketPriorityQueue ordered = new BucketPriorityQueue<>( + size, + context.bigArrays(), + Function.identity() + ) + ) { bucketsReducer.forEach(entry -> { InternalGeoGridBucket bucket = createBucket(entry.key, entry.value.getDocCount(), entry.value.getAggregations()); ordered.insertWithOverflow(bucket); @@ -152,7 +159,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (InternalGeoGridBucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params); } builder.endArray(); return builder; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index c972845468c2b..8884a412bcf41 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -13,12 +13,13 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements GeoGrid.Bucket, Comparable { @@ -27,8 +28,6 @@ public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregati protected long docCount; protected InternalAggregations aggregations; - long bucketOrd; - public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; @@ -76,14 +75,12 @@ public int compareTo(InternalGeoGridBucket other) { return 0; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + final void bucketToXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(Aggregation.CommonFields.KEY.getPreferredName(), getKeyAsString()); builder.field(Aggregation.CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index b5d3485e72f82..b83001c34377e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.Weight; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.CardinalityUpperBound; @@ -62,8 +63,8 @@ public void setScorer(Scorable scorer) throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - assert owningBucketOrds.length == 1 && owningBucketOrds[0] == 0 : "global aggregator can only be a top level aggregator"; + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + assert owningBucketOrds.size() == 1 && owningBucketOrds.get(0) == 0 : "global aggregator can only be a top level aggregator"; return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalGlobal( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java index b81d8b002b6b2..5ea8cd035e580 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramAggregator.java @@ -10,6 +10,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; @@ -79,11 +80,11 @@ public AbstractHistogramAggregator( } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds, (bucketValue, docCount, subAggregationResults) -> { double roundKey = Double.longBitsToDouble(bucketValue); double key = roundKey * interval + offset; - return new InternalHistogram.Bucket(key, docCount, keyed, formatter, subAggregationResults); + return new InternalHistogram.Bucket(key, docCount, formatter, subAggregationResults); }, (owningBucketOrd, buckets) -> { // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java index 16a83ed04e524..7806d8cd8efe2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBucket.java @@ -16,7 +16,7 @@ /** * A bucket in the histogram where documents fall in */ -public abstract class AbstractHistogramBucket extends InternalMultiBucketAggregation.InternalBucket { +public abstract class AbstractHistogramBucket extends InternalMultiBucketAggregation.InternalBucketWritable { protected final long docCount; protected final InternalAggregations aggregations; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 86c320d8dc319..1eb0226ad8c8c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -17,6 +17,7 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.Rounding.DateTimeUnit; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; @@ -337,9 +338,9 @@ private void addRoundedValue(long rounded, int doc, long owningBucketOrd, LeafBu } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds, (bucketValue, docCount, subAggregationResults) -> { - return new InternalDateHistogram.Bucket(bucketValue, docCount, keyed, formatter, subAggregationResults); + return new InternalDateHistogram.Bucket(bucketValue, docCount, formatter, subAggregationResults); }, (owningBucketOrd, buckets) -> { // the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); @@ -465,7 +466,6 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) { new InternalDateHistogram.Bucket( rangeBucket.getFrom().toInstant().toEpochMilli(), rangeBucket.getDocCount(), - keyed, format, rangeBucket.getAggregations() ) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java index 2bfd85e5fe03a..5a104055d9aec 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.FieldData; @@ -163,14 +164,13 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForVariableBuckets( owningBucketOrds, bucketOrds, (bucketValue, docCount, subAggregationResults) -> new InternalDateHistogram.Bucket( bucketValue, docCount, - keyed, formatter, subAggregationResults ), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 564abff2a9f97..d2badbeec4622 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -53,19 +53,17 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< public static class Bucket extends AbstractHistogramBucket implements KeyComparable { final long key; - private final transient boolean keyed; - public Bucket(long key, long docCount, boolean keyed, DocValueFormat format, InternalAggregations aggregations) { + public Bucket(long key, long docCount, DocValueFormat format, InternalAggregations aggregations) { super(docCount, aggregations, format); - this.keyed = keyed; this.key = key; } /** * Read from a stream. */ - public static Bucket readFrom(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { - return new Bucket(in.readLong(), in.readVLong(), keyed, format, InternalAggregations.readFrom(in)); + public static Bucket readFrom(StreamInput in, DocValueFormat format) throws IOException { + return new Bucket(in.readLong(), in.readVLong(), format, InternalAggregations.readFrom(in)); } @Override @@ -101,8 +99,7 @@ public Object getKey() { return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params, boolean keyed) throws IOException { String keyAsString = format.format(key).toString(); if (keyed) { builder.startObject(keyAsString); @@ -116,7 +113,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } @Override @@ -124,15 +120,10 @@ public int compareKey(Bucket other) { return Long.compare(key, other.key); } - public boolean getKeyed() { - return keyed; - } - Bucket finalizeSampling(SamplingContext samplingContext) { return new Bucket( key, samplingContext.scaleUp(docCount), - keyed, format, InternalAggregations.finalizeSampling(aggregations, samplingContext) ); @@ -237,7 +228,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { } else { downsampledResultsOffset = false; } - buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, keyed, format)); + buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.V_8_14_0)) { // list is mutable by #readCollectionAsList contract @@ -301,7 +292,7 @@ public InternalDateHistogram create(List buckets) { @Override public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { - return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations); + return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations); } private List reduceBuckets(final PriorityQueue> pq, AggregationReduceContext reduceContext) { @@ -398,7 +389,7 @@ public void accept(long key) { reduceContext.consumeBucketsAndMaybeBreak(size); size = 0; } - iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); + iter.add(new InternalDateHistogram.Bucket(key, 0, format, reducedEmptySubAggs)); } }); } @@ -546,7 +537,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, keyed); } if (keyed) { builder.endObject(); @@ -603,7 +594,7 @@ public InternalAggregation createAggregation(List { final double key; - private final transient boolean keyed; - public Bucket(double key, long docCount, boolean keyed, DocValueFormat format, InternalAggregations aggregations) { + public Bucket(double key, long docCount, DocValueFormat format, InternalAggregations aggregations) { super(docCount, aggregations, format); - this.keyed = keyed; this.key = key; } /** * Read from a stream. */ - public static Bucket readFrom(StreamInput in, boolean keyed, DocValueFormat format) throws IOException { - return new Bucket(in.readDouble(), in.readVLong(), keyed, format, InternalAggregations.readFrom(in)); + public static Bucket readFrom(StreamInput in, DocValueFormat format) throws IOException { + return new Bucket(in.readDouble(), in.readVLong(), format, InternalAggregations.readFrom(in)); } @Override @@ -96,8 +94,7 @@ public Object getKey() { return key; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params, boolean keyed) throws IOException { String keyAsString = format.format(key).toString(); if (keyed) { builder.startObject(keyAsString); @@ -111,7 +108,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } @Override @@ -119,15 +115,10 @@ public int compareKey(Bucket other) { return Double.compare(key, other.key); } - public boolean getKeyed() { - return keyed; - } - Bucket finalizeSampling(SamplingContext samplingContext) { return new Bucket( key, samplingContext.scaleUp(docCount), - keyed, format, InternalAggregations.finalizeSampling(aggregations, samplingContext) ); @@ -220,7 +211,7 @@ public InternalHistogram(StreamInput in) throws IOException { } format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, keyed, format)); + buckets = in.readCollectionAsList(stream -> Bucket.readFrom(stream, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.V_8_14_0)) { // list is mutable by #readCollectionAsList contract @@ -265,7 +256,7 @@ public InternalHistogram create(List buckets) { @Override public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { - return new Bucket(prototype.key, prototype.docCount, prototype.keyed, prototype.format, aggregations); + return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations); } private List reduceBuckets(PriorityQueue> pq, AggregationReduceContext reduceContext) { @@ -373,7 +364,7 @@ public void accept(double key) { reduceContext.consumeBucketsAndMaybeBreak(size); size = 0; } - iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs)); + iter.add(new Bucket(key, 0, format, reducedEmptySubAggs)); } }); } @@ -478,7 +469,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, keyed); } if (keyed) { builder.endObject(); @@ -508,7 +499,7 @@ public InternalAggregation createAggregation(List buckets = new ArrayList<>(numClusters); - for (int bucketOrd = 0; bucketOrd < numClusters; bucketOrd++) { - buckets.add(collector.buildBucket(bucketOrd, subAggregationResults.apply(bucketOrd))); - } + List buckets = new ArrayList<>(numClusters); + for (int bucketOrd = 0; bucketOrd < numClusters; bucketOrd++) { + buckets.add(collector.buildBucket(bucketOrd, subAggregationResults.apply(bucketOrd))); + } - Function, InternalAggregation> resultBuilder = bucketsToFormat -> { - // The contract of the histogram aggregation is that shards must return - // buckets ordered by centroid in ascending order - CollectionUtil.introSort(bucketsToFormat, BucketOrder.key(true).comparator()); + Function, InternalAggregation> resultBuilder = bucketsToFormat -> { + // The contract of the histogram aggregation is that shards must return + // buckets ordered by centroid in ascending order + CollectionUtil.introSort(bucketsToFormat, BucketOrder.key(true).comparator()); - InternalVariableWidthHistogram.EmptyBucketInfo emptyBucketInfo = new InternalVariableWidthHistogram.EmptyBucketInfo( - buildEmptySubAggregations() - ); + InternalVariableWidthHistogram.EmptyBucketInfo emptyBucketInfo = new InternalVariableWidthHistogram.EmptyBucketInfo( + buildEmptySubAggregations() + ); - return new InternalVariableWidthHistogram(name, bucketsToFormat, emptyBucketInfo, numBuckets, formatter, metadata()); - }; + return new InternalVariableWidthHistogram(name, bucketsToFormat, emptyBucketInfo, numBuckets, formatter, metadata()); + }; - return new InternalAggregation[] { resultBuilder.apply(buckets) }; + return new InternalAggregation[] { resultBuilder.apply(buckets) }; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java index 5c8f8ab9c562e..b49668e45b889 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java @@ -8,6 +8,7 @@ */ package org.elasticsearch.search.aggregations.bucket.missing; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.fielddata.DocValueBits; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -67,7 +68,7 @@ public void collect(int doc, long bucket) throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalMissing( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 0fbb9745aa400..23a2d6380c290 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -124,7 +125,7 @@ private void processBufferedDocs() throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalNested( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 0e3e4679c7a2d..2477b67367e14 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -86,7 +87,7 @@ public void collect(int childDoc, long bucket) throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalReverseNested( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java index f7022ff22d373..36a8fccc77e99 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java @@ -33,14 +33,12 @@ public class InternalIpPrefix extends InternalMultiBucketAggregation { - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket + public static class Bucket extends InternalMultiBucketAggregation.InternalBucketWritable implements IpPrefix.Bucket, KeyComparable { - private final transient DocValueFormat format; private final BytesRef key; - private final boolean keyed; private final boolean isIpv6; private final int prefixLength; private final boolean appendPrefixLength; @@ -48,18 +46,14 @@ public static class Bucket extends InternalMultiBucketAggregation.InternalBucket private final InternalAggregations aggregations; public Bucket( - DocValueFormat format, BytesRef key, - boolean keyed, boolean isIpv6, int prefixLength, boolean appendPrefixLength, long docCount, InternalAggregations aggregations ) { - this.format = format; this.key = key; - this.keyed = keyed; this.isIpv6 = isIpv6; this.prefixLength = prefixLength; this.appendPrefixLength = appendPrefixLength; @@ -70,9 +64,7 @@ public Bucket( /** * Read from a stream. */ - public Bucket(StreamInput in, DocValueFormat format, boolean keyed) throws IOException { - this.format = format; - this.keyed = keyed; + public Bucket(StreamInput in) throws IOException { this.key = in.readBytesRef(); this.isIpv6 = in.readBoolean(); this.prefixLength = in.readVInt(); @@ -81,8 +73,7 @@ public Bucket(StreamInput in, DocValueFormat format, boolean keyed) throws IOExc this.aggregations = InternalAggregations.readFrom(in); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params, boolean keyed) throws IOException { String key = DocValueFormat.IP.format(this.key); if (appendPrefixLength) { key = key + "/" + prefixLength; @@ -101,7 +92,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(IpPrefixAggregationBuilder.PREFIX_LENGTH_FIELD.getPreferredName(), prefixLength); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } private static BytesRef netmask(int prefixLength) { @@ -118,10 +108,6 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - public DocValueFormat getFormat() { - return format; - } - public BytesRef getKey() { return key; } @@ -162,14 +148,13 @@ public boolean equals(Object o) { && prefixLength == bucket.prefixLength && appendPrefixLength == bucket.appendPrefixLength && docCount == bucket.docCount - && Objects.equals(format, bucket.format) && Objects.equals(key, bucket.key) && Objects.equals(aggregations, bucket.aggregations); } @Override public int hashCode() { - return Objects.hash(format, key, isIpv6, prefixLength, appendPrefixLength, docCount, aggregations); + return Objects.hash(key, isIpv6, prefixLength, appendPrefixLength, docCount, aggregations); } @Override @@ -206,7 +191,7 @@ public InternalIpPrefix(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); minDocCount = in.readVLong(); - buckets = in.readCollectionAsList(stream -> new Bucket(stream, format, keyed)); + buckets = in.readCollectionAsList(Bucket::new); } @Override @@ -298,7 +283,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (InternalIpPrefix.Bucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, keyed); } if (keyed) { builder.endObject(); @@ -316,9 +301,7 @@ public InternalIpPrefix create(List buckets) { @Override public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { return new Bucket( - format, prototype.key, - prototype.keyed, prototype.isIpv6, prototype.prefixLength, prototype.appendPrefixLength, @@ -328,16 +311,7 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) } private Bucket createBucket(Bucket prototype, InternalAggregations aggregations, long docCount) { - return new Bucket( - format, - prototype.key, - prototype.keyed, - prototype.isIpv6, - prototype.prefixLength, - prototype.appendPrefixLength, - docCount, - aggregations - ); + return new Bucket(prototype.key, prototype.isIpv6, prototype.prefixLength, prototype.appendPrefixLength, docCount, aggregations); } private Bucket reduceBucket(List buckets, AggregationReduceContext context) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java index 9548cd871e161..38d26bfa9ae28 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java @@ -12,6 +12,8 @@ import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -160,57 +162,60 @@ private static void maskIpAddress(final BytesRef ipAddress, final BytesRef subne } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { long totalOrdsToCollect = 0; - final int[] bucketsInOrd = new int[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - final long bucketCount = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); - bucketsInOrd[ordIdx] = (int) bucketCount; - totalOrdsToCollect += bucketCount; - } - - long[] bucketOrdsToCollect = new long[(int) totalOrdsToCollect]; - int b = 0; - for (long owningBucketOrd : owningBucketOrds) { - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); - while (ordsEnum.next()) { - bucketOrdsToCollect[b++] = ordsEnum.ord(); + try (IntArray bucketsInOrd = bigArrays().newIntArray(owningBucketOrds.size())) { + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long bucketCount = bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)); + bucketsInOrd.set(ordIdx, (int) bucketCount); + totalOrdsToCollect += bucketCount; } - } - var subAggregationResults = buildSubAggsForBuckets(bucketOrdsToCollect); - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - b = 0; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - List buckets = new ArrayList<>(bucketsInOrd[ordIdx]); - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long ordinal = ordsEnum.ord(); - if (bucketOrdsToCollect[b] != ordinal) { - throw AggregationErrors.iterationOrderChangedWithoutMutating(bucketOrds.toString(), ordinal, bucketOrdsToCollect[b]); + try (LongArray bucketOrdsToCollect = bigArrays().newLongArray(totalOrdsToCollect)) { + int[] b = new int[] { 0 }; + for (long i = 0; i < owningBucketOrds.size(); i++) { + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(i)); + while (ordsEnum.next()) { + bucketOrdsToCollect.set(b[0]++, ordsEnum.ord()); + } } - BytesRef ipAddress = new BytesRef(); - ordsEnum.readValue(ipAddress); - long docCount = bucketDocCount(ordinal); - buckets.add( - new InternalIpPrefix.Bucket( - config.format(), - BytesRef.deepCopyOf(ipAddress), - keyed, - ipPrefix.isIpv6, - ipPrefix.prefixLength, - ipPrefix.appendPrefixLength, - docCount, - subAggregationResults.apply(b++) - ) - ); - - // NOTE: the aggregator is expected to return sorted results - CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); + + var subAggregationResults = buildSubAggsForBuckets(bucketOrdsToCollect); + b[0] = 0; + return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { + List buckets = new ArrayList<>(bucketsInOrd.get(ordIdx)); + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx)); + while (ordsEnum.next()) { + long ordinal = ordsEnum.ord(); + if (bucketOrdsToCollect.get(b[0]) != ordinal) { + throw AggregationErrors.iterationOrderChangedWithoutMutating( + bucketOrds.toString(), + ordinal, + bucketOrdsToCollect.get(b[0]) + ); + } + BytesRef ipAddress = new BytesRef(); + ordsEnum.readValue(ipAddress); + long docCount = bucketDocCount(ordinal); + checkRealMemoryCBForInternalBucket(); + buckets.add( + new InternalIpPrefix.Bucket( + BytesRef.deepCopyOf(ipAddress), + ipPrefix.isIpv6, + ipPrefix.prefixLength, + ipPrefix.appendPrefixLength, + docCount, + subAggregationResults.apply(b[0]++) + ) + ); + + // NOTE: the aggregator is expected to return sorted results + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator()); + } + return new InternalIpPrefix(name, config.format(), keyed, minDocCount, buckets, metadata()); + }); } - results[ordIdx] = new InternalIpPrefix(name, config.format(), keyed, minDocCount, buckets, metadata()); } - return results; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index 6119af3cb6a57..c10bb3543549e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -14,6 +14,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; @@ -359,13 +360,13 @@ private interface DocCollector { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForFixedBucketCount( owningBucketOrds, ranges.length, (offsetInOwningOrd, docCount, subAggregationResults) -> { Range range = ranges[offsetInOwningOrd]; - return new InternalBinaryRange.Bucket(format, keyed, range.key, range.from, range.to, docCount, subAggregationResults); + return new InternalBinaryRange.Bucket(format, range.key, range.from, range.to, docCount, subAggregationResults); }, buckets -> new InternalBinaryRange(name, format, keyed, buckets, metadata()) ); @@ -377,7 +378,7 @@ public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(ranges.length); for (Range range : ranges) { - InternalBinaryRange.Bucket bucket = new InternalBinaryRange.Bucket(format, keyed, range.key, range.from, range.to, 0, subAggs); + InternalBinaryRange.Bucket bucket = new InternalBinaryRange.Bucket(format, range.key, range.from, range.to, 0, subAggs); buckets.add(bucket); } return new InternalBinaryRange(name, format, keyed, buckets, metadata()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index 100bab7443a51..34a2ebea88440 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -36,26 +36,16 @@ public final class InternalBinaryRange extends InternalMultiBucketAggregation Bucket.createFromStream(stream, format, keyed)); + buckets = in.readCollectionAsList(stream -> Bucket.createFromStream(stream, format)); } @Override @@ -235,7 +222,7 @@ public InternalBinaryRange create(List buckets) { @Override public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { - return new Bucket(format, keyed, prototype.key, prototype.from, prototype.to, prototype.docCount, aggregations); + return new Bucket(format, prototype.key, prototype.from, prototype.to, prototype.docCount, aggregations); } @Override @@ -251,7 +238,7 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont @Override protected Bucket createBucket(Bucket proto, long docCount, InternalAggregations aggregations) { - return new Bucket(proto.format, proto.keyed, proto.key, proto.from, proto.to, docCount, aggregations); + return new Bucket(proto.format, proto.key, proto.from, proto.to, docCount, aggregations); } }; @@ -299,7 +286,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (Bucket range : buckets) { - range.toXContent(builder, params); + range.bucketToXContent(builder, params, keyed); } if (keyed) { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java index 7b2858806c325..7291a099dd7f7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java @@ -34,19 +34,11 @@ public Bucket( boolean keyed, DocValueFormat formatter ) { - super(key, from, to, docCount, InternalAggregations.from(aggregations), keyed, formatter); + super(key, from, to, docCount, InternalAggregations.from(aggregations), formatter); } - public Bucket( - String key, - double from, - double to, - long docCount, - InternalAggregations aggregations, - boolean keyed, - DocValueFormat formatter - ) { - super(key, from, to, docCount, aggregations, keyed, formatter); + public Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, DocValueFormat formatter) { + super(key, from, to, docCount, aggregations, formatter); } @Override @@ -99,10 +91,9 @@ public Bucket createBucket( double to, long docCount, InternalAggregations aggregations, - boolean keyed, DocValueFormat formatter ) { - return new Bucket(key, from, to, docCount, aggregations, keyed, formatter); + return new Bucket(key, from, to, docCount, aggregations, formatter); } @Override @@ -113,7 +104,6 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.internalGetTo(), prototype.getDocCount(), aggregations, - prototype.getKeyed(), prototype.getFormat() ); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java index d1c3761d45e82..9a33df4702c1c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java @@ -23,8 +23,8 @@ public class InternalGeoDistance extends InternalRange ranges, DocValueFormat format, boolean keye } @SuppressWarnings("unchecked") - public B createBucket( - String key, - double from, - double to, - long docCount, - InternalAggregations aggregations, - boolean keyed, - DocValueFormat format - ) { - return (B) new Bucket(key, from, to, docCount, aggregations, keyed, format); + public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, DocValueFormat format) { + return (B) new Bucket(key, from, to, docCount, aggregations, format); } @SuppressWarnings("unchecked") @@ -232,7 +209,6 @@ public B createBucket(InternalAggregations aggregations, B prototype) { prototype.to, prototype.getDocCount(), aggregations, - prototype.keyed, prototype.format ); } @@ -285,7 +261,7 @@ public InternalRange(StreamInput in) throws IOException { } long docCount = in.readVLong(); InternalAggregations aggregations = InternalAggregations.readFrom(in); - ranges.add(getFactory().createBucket(key, from, to, docCount, aggregations, keyed, format)); + ranges.add(getFactory().createBucket(key, from, to, docCount, aggregations, format)); } this.ranges = ranges; } @@ -335,7 +311,7 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont @Override protected Bucket createBucket(Bucket proto, long docCount, InternalAggregations aggregations) { - return getFactory().createBucket(proto.key, proto.from, proto.to, docCount, aggregations, proto.keyed, proto.format); + return getFactory().createBucket(proto.key, proto.from, proto.to, docCount, aggregations, proto.format); } }; @@ -371,7 +347,6 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { b.to, samplingContext.scaleUp(b.getDocCount()), InternalAggregations.finalizeSampling(b.getAggregations(), samplingContext), - b.keyed, b.format ) ) @@ -390,7 +365,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (B range : ranges) { - range.toXContent(builder, params); + range.bucketToXContent(builder, params, keyed); } if (keyed) { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 6d63bb786c29f..a4574e8081868 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.NumericDoubleValues; @@ -531,21 +532,13 @@ protected long subBucketOrdinal(long owningBucketOrdinal, int rangeOrd) { @Override @SuppressWarnings("unchecked") - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForFixedBucketCount( owningBucketOrds, ranges.length, (offsetInOwningOrd, docCount, subAggregationResults) -> { Range range = ranges[offsetInOwningOrd]; - return rangeFactory.createBucket( - range.key, - range.originalFrom, - range.originalTo, - docCount, - subAggregationResults, - keyed, - format - ); + return rangeFactory.createBucket(range.key, range.originalFrom, range.originalTo, docCount, subAggregationResults, format); }, buckets -> rangeFactory.create(name, buckets, format, keyed, metadata()) ); @@ -563,7 +556,6 @@ public InternalAggregation buildEmptyAggregation() { range.originalTo, 0, subAggs, - keyed, format ); buckets.add(bucket); @@ -613,7 +605,7 @@ public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = new ArrayList<>(ranges.length); for (RangeAggregator.Range range : ranges) { - buckets.add(factory.createBucket(range.key, range.originalFrom, range.originalTo, 0, subAggs, keyed, format)); + buckets.add(factory.createBucket(range.key, range.originalFrom, range.originalTo, 0, subAggs, format)); } return factory.create(name, buckets, format, keyed, metadata()); } @@ -885,7 +877,7 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) { Range r = ranges[i]; InternalFilters.InternalBucket b = filters.getBuckets().get(i); buckets.add( - rangeFactory.createBucket(r.getKey(), r.originalFrom, r.originalTo, b.getDocCount(), b.getAggregations(), keyed, format) + rangeFactory.createBucket(r.getKey(), r.originalFrom, r.originalTo, b.getDocCount(), b.getAggregations(), format) ); } return rangeFactory.create(name(), buckets, format, keyed, filters.getMetadata()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index 37cee75c11b48..70f72fafba7b5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -19,6 +19,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -120,7 +121,7 @@ public void postCollection() throws IOException { } @Override - public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { + public void prepareSelectedBuckets(LongArray selectedBuckets) { // no-op - deferred aggs processed in postCollection call } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 78b2cdfe7655d..a4c06a194fbf7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -11,6 +11,7 @@ import org.apache.lucene.misc.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -212,7 +213,7 @@ protected boolean shouldDefer(Aggregator aggregator) { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalSampler( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java index fc03786356f87..699b8c6b5d500 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java @@ -9,18 +9,23 @@ package org.elasticsearch.search.aggregations.bucket.sampler.random; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; -import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.CardinalityUpperBound; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; @@ -33,14 +38,13 @@ public class RandomSamplerAggregator extends BucketsAggregator implements Single private final int seed; private final Integer shardSeed; private final double probability; - private final CheckedSupplier weightSupplier; + private Weight weight; RandomSamplerAggregator( String name, int seed, Integer shardSeed, double probability, - CheckedSupplier weightSupplier, AggregatorFactories factories, AggregationContext context, Aggregator parent, @@ -55,12 +59,35 @@ public class RandomSamplerAggregator extends BucketsAggregator implements Single RandomSamplerAggregationBuilder.NAME + " aggregation [" + name + "] must have sub aggregations configured" ); } - this.weightSupplier = weightSupplier; this.shardSeed = shardSeed; } + /** + * This creates the query weight which will be used in the aggregator. + * + * This weight is a boolean query between {@link RandomSamplingQuery} and the configured top level query of the search. This allows + * the aggregation to iterate the documents directly, thus sampling in the background instead of the foreground. + * @return weight to be used, is cached for additional usages + * @throws IOException when building the weight or queries fails; + */ + private Weight getWeight() throws IOException { + if (weight == null) { + ScoreMode scoreMode = scoreMode(); + BooleanQuery.Builder fullQuery = new BooleanQuery.Builder().add( + context.query(), + scoreMode.needsScores() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER + ); + if (probability < 1.0) { + Query sampleQuery = new RandomSamplingQuery(probability, seed, shardSeed == null ? context.shardRandomSeed() : shardSeed); + fullQuery.add(sampleQuery, BooleanClause.Occur.FILTER); + } + weight = context.searcher().createWeight(context.searcher().rewrite(fullQuery.build()), scoreMode, 1f); + } + return weight; + } + @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return buildAggregationsForSingleBucket( owningBucketOrds, (owningBucketOrd, subAggregationResults) -> new InternalRandomSampler( @@ -100,22 +127,26 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt if (sub.isNoop()) { return LeafBucketCollector.NO_OP_COLLECTOR; } + + Scorer scorer = getWeight().scorer(aggCtx.getLeafReaderContext()); + // This means there are no docs to iterate, possibly due to the fields not existing + if (scorer == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + sub.setScorer(scorer); + // No sampling is being done, collect all docs + // TODO know when sampling would be much slower and skip sampling: https://github.com/elastic/elasticsearch/issues/84353 if (probability >= 1.0) { grow(1); - return new LeafBucketCollector() { + return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { collectExistingBucket(sub, doc, 0); } }; } - // TODO know when sampling would be much slower and skip sampling: https://github.com/elastic/elasticsearch/issues/84353 - Scorer scorer = weightSupplier.get().scorer(aggCtx.getLeafReaderContext()); - // This means there are no docs to iterate, possibly due to the fields not existing - if (scorer == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; - } + final DocIdSetIterator docIt = scorer.iterator(); final Bits liveDocs = aggCtx.getLeafReaderContext().reader().getLiveDocs(); try { @@ -135,5 +166,4 @@ public void collect(int doc, long owningBucketOrd) throws IOException { // Since we have done our own collection, there is nothing for the leaf collector to do return LeafBucketCollector.NO_OP_COLLECTOR; } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java index 67c958046dac7..50921501896d3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorFactory.java @@ -9,10 +9,6 @@ package org.elasticsearch.search.aggregations.bucket.sampler.random; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -30,7 +26,6 @@ public class RandomSamplerAggregatorFactory extends AggregatorFactory { private final Integer shardSeed; private final double probability; private final SamplingContext samplingContext; - private Weight weight; RandomSamplerAggregatorFactory( String name, @@ -57,41 +52,6 @@ public Optional getSamplingContext() { @Override public Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { - return new RandomSamplerAggregator( - name, - seed, - shardSeed, - probability, - this::getWeight, - factories, - context, - parent, - cardinality, - metadata - ); + return new RandomSamplerAggregator(name, seed, shardSeed, probability, factories, context, parent, cardinality, metadata); } - - /** - * This creates the query weight which will be used in the aggregator. - * - * This weight is a boolean query between {@link RandomSamplingQuery} and the configured top level query of the search. This allows - * the aggregation to iterate the documents directly, thus sampling in the background instead of the foreground. - * @return weight to be used, is cached for additional usages - * @throws IOException when building the weight or queries fails; - */ - private Weight getWeight() throws IOException { - if (weight == null) { - RandomSamplingQuery query = new RandomSamplingQuery( - probability, - seed, - shardSeed == null ? context.shardRandomSeed() : shardSeed - ); - BooleanQuery booleanQuery = new BooleanQuery.Builder().add(query, BooleanClause.Occur.FILTER) - .add(context.query(), BooleanClause.Occur.FILTER) - .build(); - weight = context.searcher().createWeight(context.searcher().rewrite(booleanQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); - } - return weight; - } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java index 5c422a9dd4e32..4c54df366eb54 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractInternalTerms.java @@ -63,9 +63,9 @@ public abstract static class AbstractTermsBucket buckets, AggregationReduceContext context) { @@ -102,7 +104,7 @@ private B reduceBucket(List buckets, AggregationReduceContext context) { for (B bucket : buckets) { docCount += bucket.getDocCount(); if (docCountError != -1) { - if (bucket.getShowDocCountError() == false || bucket.getDocCountError() == -1) { + if (getShowDocCountError() == false || bucket.getDocCountError() == -1) { docCountError = -1; } else { docCountError += bucket.getDocCountError(); @@ -255,6 +257,7 @@ public void accept(InternalAggregation aggregation) { } otherDocCount[0] += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError = getDocCountError(terms); + setDocCountError(thisAggDocCountError); if (sumDocCountError != -1) { if (thisAggDocCountError == -1) { sumDocCountError = -1; @@ -262,16 +265,17 @@ public void accept(InternalAggregation aggregation) { sumDocCountError += thisAggDocCountError; } } - setDocCountError(thisAggDocCountError); - for (B bucket : terms.getBuckets()) { - // If there is already a doc count error for this bucket - // subtract this aggs doc count error from it to make the - // new value for the bucket. This then means that when the - // final error for the bucket is calculated below we account - // for the existing error calculated in a previous reduce. - // Note that if the error is unbounded (-1) this will be fixed - // later in this method. - bucket.updateDocCountError(-thisAggDocCountError); + if (getShowDocCountError()) { + for (B bucket : terms.getBuckets()) { + // If there is already a doc count error for this bucket + // subtract this aggs doc count error from it to make the + // new value for the bucket. This then means that when the + // final error for the bucket is calculated below we account + // for the existing error calculated in a previous reduce. + // Note that if the error is unbounded (-1) this will be fixed + // later in this method. + bucket.updateDocCountError(-thisAggDocCountError); + } } if (terms.getBuckets().isEmpty() == false) { bucketsList.add(terms.getBuckets()); @@ -317,17 +321,17 @@ public InternalAggregation get() { result.add(bucket.reduced(AbstractInternalTerms.this::reduceBucket, reduceContext)); }); } - for (B r : result) { - if (sumDocCountError == -1) { - r.setDocCountError(-1); - } else { - r.updateDocCountError(sumDocCountError); + if (getShowDocCountError()) { + for (B r : result) { + if (sumDocCountError == -1) { + r.setDocCountError(-1); + } else { + r.updateDocCountError(sumDocCountError); + } } } - long docCountError; - if (sumDocCountError == -1) { - docCountError = -1; - } else { + long docCountError = -1; + if (sumDocCountError != -1) { docCountError = size == 1 ? 0 : sumDocCountError; } return create(name, result, reduceContext.isFinalReduce() ? getOrder() : thisReduceOrder, docCountError, otherDocCount[0]); @@ -347,7 +351,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { b -> createBucket( samplingContext.scaleUp(b.getDocCount()), InternalAggregations.finalizeSampling(b.getAggregations(), samplingContext), - b.getShowDocCountError() ? samplingContext.scaleUp(b.getDocCountError()) : 0, + getShowDocCountError() ? samplingContext.scaleUp(b.getDocCountError()) : 0, b ) ) @@ -361,6 +365,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { protected static XContentBuilder doXContentCommon( XContentBuilder builder, Params params, + boolean showDocCountError, Long docCountError, long otherDocCount, List> buckets @@ -369,7 +374,7 @@ protected static XContentBuilder doXContentCommon( builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount); builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (AbstractTermsBucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params, showDocCountError); } builder.endArray(); return builder; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/Fixture.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketAndOrd.java similarity index 57% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/Fixture.java rename to server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketAndOrd.java index f7ee88c715dfa..7b853860b7959 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/Fixture.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketAndOrd.java @@ -7,15 +7,15 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.gradle.internal.test; +package org.elasticsearch.search.aggregations.bucket.terms; -/** - * Any object that can produce an accompanying stop task, meant to tear down - * a previously instantiated service. - */ -public interface Fixture { +/** Represents a bucket and its bucket ordinal */ +public final class BucketAndOrd { - /** A task which will stop this fixture. This should be used as a finalizedBy for any tasks that use the fixture. */ - Object getStopTask(); + public final B bucket; // the bucket + public long ord; // mutable ordinal of the bucket + public BucketAndOrd(B bucket) { + this.bucket = bucket; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index 9789a9edc58f7..5c28c25de6e87 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -164,8 +164,8 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.term, prototype.docCount, aggregations, - prototype.showDocCountError, - prototype.docCountError, + showTermDocCountError, + prototype.getDocCountError(), prototype.format ); } @@ -216,6 +216,6 @@ public void close() { @Override protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, DoubleTerms.Bucket prototype) { - return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format); + return new Bucket(prototype.term, docCount, aggs, showTermDocCountError, docCountError, format); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 0f7c61dc9f25b..5a79155d1d4f5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -191,7 +192,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); } @@ -696,61 +697,66 @@ abstract class ResultStrategy< B extends InternalMultiBucketAggregation.InternalBucket, TB extends InternalMultiBucketAggregation.InternalBucket> implements Releasable { - private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + if (valueCount == 0) { // no context in this reader - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - results[ordIdx] = buildNoValuesResult(owningBucketOrds[ordIdx]); - } - return results; + return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> buildNoValuesResult(owningBucketOrds.get(ordIdx)) + ); } - - B[][] topBucketsPreOrd = buildTopBucketsPerOrd(owningBucketOrds.length); - long[] otherDocCount = new long[owningBucketOrds.length]; - GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - final int size; - if (bucketCountThresholds.getMinDocCount() == 0) { - // if minDocCount == 0 then we can end up with more buckets then maxBucketOrd() returns - size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize()); - } else { - size = (int) Math.min(maxBucketOrd(), bucketCountThresholds.getShardSize()); - } - try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { - final int finalOrdIdx = ordIdx; - BucketUpdater updater = bucketUpdater(owningBucketOrds[ordIdx], lookupGlobalOrd); - collectionStrategy.forEach(owningBucketOrds[ordIdx], new BucketInfoConsumer() { - TB spare = null; - - @Override - public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { - otherDocCount[finalOrdIdx] += docCount; - if (docCount >= bucketCountThresholds.getShardMinDocCount()) { - if (spare == null) { - spare = buildEmptyTemporaryBucket(); + try ( + LongArray otherDocCount = bigArrays().newLongArray(owningBucketOrds.size(), true); + ObjectArray topBucketsPreOrd = buildTopBucketsPerOrd(owningBucketOrds.size()) + ) { + GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; + for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) { + final int size; + if (bucketCountThresholds.getMinDocCount() == 0) { + // if minDocCount == 0 then we can end up with more buckets then maxBucketOrd() returns + size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize()); + } else { + size = (int) Math.min(maxBucketOrd(), bucketCountThresholds.getShardSize()); + } + try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { + final long finalOrdIdx = ordIdx; + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + BucketUpdater updater = bucketUpdater(owningBucketOrd, lookupGlobalOrd); + collectionStrategy.forEach(owningBucketOrd, new BucketInfoConsumer() { + TB spare = null; + + @Override + public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { + otherDocCount.increment(finalOrdIdx, docCount); + if (docCount >= bucketCountThresholds.getShardMinDocCount()) { + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = buildEmptyTemporaryBucket(); + } + updater.updateBucket(spare, globalOrd, bucketOrd, docCount); + spare = ordered.insertWithOverflow(spare); } - updater.updateBucket(spare, globalOrd, bucketOrd, docCount); - spare = ordered.insertWithOverflow(spare); } + }); + + // Get the top buckets + topBucketsPreOrd.set(ordIdx, buildBuckets((int) ordered.size())); + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + checkRealMemoryCBForInternalBucket(); + B bucket = convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); + topBucketsPreOrd.get(ordIdx)[i] = bucket; + otherDocCount.increment(ordIdx, -bucket.getDocCount()); } - }); - - // Get the top buckets - topBucketsPreOrd[ordIdx] = buildBuckets((int) ordered.size()); - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - topBucketsPreOrd[ordIdx][i] = convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); - otherDocCount[ordIdx] -= topBucketsPreOrd[ordIdx][i].getDocCount(); } } - } - buildSubAggs(topBucketsPreOrd); + buildSubAggs(topBucketsPreOrd); - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - results[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCount[ordIdx], topBucketsPreOrd[ordIdx]); + return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCount.get(ordIdx), topBucketsPreOrd.get(ordIdx)) + ); } - return results; } /** @@ -785,7 +791,7 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep /** * Build an array to hold the "top" buckets for each ordinal. */ - abstract B[][] buildTopBucketsPerOrd(int size); + abstract ObjectArray buildTopBucketsPerOrd(long size); /** * Build an array of buckets for a particular ordinal to collect the @@ -802,7 +808,7 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep * Build the sub-aggregations into the buckets. This will usually * delegate to {@link #buildSubAggsForAllBuckets}. */ - abstract void buildSubAggs(B[][] topBucketsPreOrd) throws IOException; + abstract void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException; /** * Turn the buckets into an aggregation result. @@ -841,8 +847,8 @@ LeafBucketCollector wrapCollector(LeafBucketCollector primary) { } @Override - StringTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new StringTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -874,12 +880,11 @@ StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp, GlobalOrdLookup BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd)); StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); result.bucketOrd = temp.bucketOrd; - result.docCountError = 0; return result; } @Override - void buildSubAggs(StringTerms.Bucket[][] topBucketsPreOrd) throws IOException { + void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException { buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); } @@ -973,8 +978,8 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } @Override - SignificantStringTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new SignificantStringTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -1026,7 +1031,7 @@ SignificantStringTerms.Bucket convertTempBucketToRealBucket( } @Override - void buildSubAggs(SignificantStringTerms.Bucket[][] topBucketsPreOrd) throws IOException { + void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException { buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java index f179b7d05f9a4..3f75a27306ab4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java @@ -134,7 +134,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th // There is a condition (presumably when only one shard has a bucket?) where reduce is not called // and I end up with buckets that contravene the user's min_doc_count criteria in my reducer if (bucket.subsetDf >= minDocCount) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params); } } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index 563321f56cb5f..d7087a121b4f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -87,7 +87,10 @@ protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { writeSize(shardSize, out); out.writeBoolean(showTermDocCountError); out.writeVLong(otherDocCount); - out.writeCollection(buckets); + out.writeVInt(buckets.size()); + for (var bucket : buckets) { + bucket.writeTo(out, showTermDocCountError); + } } @Override @@ -95,6 +98,11 @@ protected void setDocCountError(long docCountError) { this.docCountError = docCountError; } + @Override + protected boolean getShowDocCountError() { + return showTermDocCountError; + } + @Override protected int getShardSize() { return shardSize; @@ -145,6 +153,6 @@ public int hashCode() { @Override public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - return doXContentCommon(builder, params, docCountError, otherDocCount, buckets); + return doXContentCommon(builder, params, showTermDocCountError, docCountError, otherDocCount, buckets); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java index 6540cd2ee38da..7859319f4dd0d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.SetBackedScalingCuckooFilter; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.BucketOrder; @@ -29,10 +30,11 @@ public abstract class InternalRareTerms, B ext implements RareTerms { - public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucket + public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucketWritable implements RareTerms.Bucket, - KeyComparable { + KeyComparable, + Writeable { /** * Reads a bucket. Should be a constructor reference. */ @@ -81,14 +83,12 @@ public InternalAggregations getAggregations() { return aggregations; } - @Override - public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); keyToXContent(builder); builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException; @@ -160,7 +160,7 @@ protected static XContentBuilder doXContentCommon(XContentBuilder builder, Param throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params); } builder.endArray(); return builder; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index a60911b466847..6c0eb465d1f80 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -45,7 +45,7 @@ public abstract class InternalSignificantTerms> extends InternalMultiBucketAggregation.InternalBucket + public abstract static class Bucket> extends InternalMultiBucketAggregation.InternalBucketWritable implements SignificantTerms.Bucket { /** @@ -62,7 +62,7 @@ public interface Reader> { long supersetSize; /** * Ordinal of the bucket while it is being built. Not used after it is - * returned from {@link Aggregator#buildAggregations(long[])} and not + * returned from {@link Aggregator#buildAggregations(org.elasticsearch.common.util.LongArray)} and not * serialized. */ transient long bucketOrd; @@ -157,8 +157,7 @@ public int hashCode() { return Objects.hash(getClass(), aggregations, score, format); } - @Override - public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + final void bucketToXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); keyToXContent(builder); builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); @@ -166,7 +165,6 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) builder.field(BG_COUNT, supersetDf); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 8e25c164d5f33..739f0b923eaab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -41,9 +41,8 @@ public interface Reader> { long bucketOrd; protected long docCount; - protected long docCountError; + private long docCountError; protected InternalAggregations aggregations; - protected final boolean showDocCountError; protected final DocValueFormat format; protected Bucket( @@ -53,29 +52,23 @@ protected Bucket( long docCountError, DocValueFormat formatter ) { - this.showDocCountError = showDocCountError; this.format = formatter; this.docCount = docCount; this.aggregations = aggregations; - this.docCountError = docCountError; + this.docCountError = showDocCountError ? docCountError : -1; } /** * Read from a stream. */ protected Bucket(StreamInput in, DocValueFormat formatter, boolean showDocCountError) throws IOException { - this.showDocCountError = showDocCountError; this.format = formatter; docCount = in.readVLong(); - docCountError = -1; - if (showDocCountError) { - docCountError = in.readLong(); - } + docCountError = showDocCountError ? in.readLong() : -1; aggregations = InternalAggregations.readFrom(in); } - @Override - public final void writeTo(StreamOutput out) throws IOException { + final void writeTo(StreamOutput out, boolean showDocCountError) throws IOException { out.writeVLong(getDocCount()); if (showDocCountError) { out.writeLong(docCountError); @@ -105,9 +98,6 @@ public void setBucketOrd(long bucketOrd) { @Override public long getDocCountError() { - if (showDocCountError == false) { - throw new IllegalStateException("show_terms_doc_count_error is false"); - } return docCountError; } @@ -121,11 +111,6 @@ protected void updateDocCountError(long docCountErrorDiff) { this.docCountError += docCountErrorDiff; } - @Override - protected boolean getShowDocCountError() { - return showDocCountError; - } - @Override public InternalAggregations getAggregations() { return aggregations; @@ -136,7 +121,7 @@ public void setAggregations(InternalAggregations aggregations) { } @Override - public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public final void bucketToXContent(XContentBuilder builder, Params params, boolean showDocCountError) throws IOException { builder.startObject(); keyToXContent(builder); builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); @@ -145,7 +130,6 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) } aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException; @@ -156,23 +140,15 @@ public boolean equals(Object obj) { return false; } Bucket that = (Bucket) obj; - if (showDocCountError && docCountError != that.docCountError) { - /* - * docCountError doesn't matter if not showing it and - * serialization sets it to -1 no matter what it was - * before. - */ - return false; - } - return Objects.equals(docCount, that.docCount) - && Objects.equals(showDocCountError, that.showDocCountError) + return Objects.equals(docCountError, that.docCountError) + && Objects.equals(docCount, that.docCount) && Objects.equals(format, that.format) && Objects.equals(aggregations, that.aggregations); } @Override public int hashCode() { - return Objects.hash(getClass(), docCount, format, showDocCountError, showDocCountError ? docCountError : -1, aggregations); + return Objects.hash(getClass(), docCount, format, docCountError, aggregations); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java index 651705bd71ef8..45ea1245ec38d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java @@ -12,7 +12,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.SetBackedScalingCuckooFilter; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; @@ -118,70 +120,67 @@ private void collectValue(long val, int docId, long owningBucketOrd, LeafBucketC } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { /* * Collect the list of buckets, populate the filter with terms * that are too frequent, and figure out how to merge sub-buckets. */ - LongRareTerms.Bucket[][] rarestPerOrd = new LongRareTerms.Bucket[owningBucketOrds.length][]; - SetBackedScalingCuckooFilter[] filters = new SetBackedScalingCuckooFilter[owningBucketOrds.length]; - long keepCount = 0; - long[] mergeMap = new long[(int) bucketOrds.size()]; - Arrays.fill(mergeMap, -1); - long offset = 0; - for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) { - try (LongHash bucketsInThisOwningBucketToCollect = new LongHash(1, bigArrays())) { - filters[owningOrdIdx] = newFilter(); - List builtBuckets = new ArrayList<>(); - LongKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]); - while (collectedBuckets.next()) { - long docCount = bucketDocCount(collectedBuckets.ord()); - // if the key is below threshold, reinsert into the new ords - if (docCount <= maxDocCount) { - LongRareTerms.Bucket bucket = new LongRareTerms.Bucket(collectedBuckets.value(), docCount, null, format); - bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(collectedBuckets.value()); - mergeMap[(int) collectedBuckets.ord()] = bucket.bucketOrd; - builtBuckets.add(bucket); - keepCount++; - } else { - filters[owningOrdIdx].add(collectedBuckets.value()); + try ( + ObjectArray rarestPerOrd = bigArrays().newObjectArray(owningBucketOrds.size()); + ObjectArray filters = bigArrays().newObjectArray(owningBucketOrds.size()) + ) { + try (LongArray mergeMap = bigArrays().newLongArray(bucketOrds.size())) { + mergeMap.fill(0, mergeMap.size(), -1); + long keepCount = 0; + long offset = 0; + for (long owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.size(); owningOrdIdx++) { + try (LongHash bucketsInThisOwningBucketToCollect = new LongHash(1, bigArrays())) { + filters.set(owningOrdIdx, newFilter()); + List builtBuckets = new ArrayList<>(); + LongKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds.get(owningOrdIdx)); + while (collectedBuckets.next()) { + long docCount = bucketDocCount(collectedBuckets.ord()); + // if the key is below threshold, reinsert into the new ords + if (docCount <= maxDocCount) { + checkRealMemoryCBForInternalBucket(); + LongRareTerms.Bucket bucket = new LongRareTerms.Bucket(collectedBuckets.value(), docCount, null, format); + bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(collectedBuckets.value()); + mergeMap.set(collectedBuckets.ord(), bucket.bucketOrd); + builtBuckets.add(bucket); + keepCount++; + } else { + filters.get(owningOrdIdx).add(collectedBuckets.value()); + } + } + rarestPerOrd.set(owningOrdIdx, builtBuckets.toArray(LongRareTerms.Bucket[]::new)); + offset += bucketsInThisOwningBucketToCollect.size(); } } - rarestPerOrd[owningOrdIdx] = builtBuckets.toArray(LongRareTerms.Bucket[]::new); - offset += bucketsInThisOwningBucketToCollect.size(); - } - } - /* - * Only merge/delete the ordinals if we have actually deleted one, - * to save on some redundant work. - */ - if (keepCount != mergeMap.length) { - LongUnaryOperator howToMerge = b -> mergeMap[(int) b]; - rewriteBuckets(offset, howToMerge); - if (deferringCollector() != null) { - ((BestBucketsDeferringCollector) deferringCollector()).rewriteBuckets(howToMerge); + /* + * Only merge/delete the ordinals if we have actually deleted one, + * to save on some redundant work. + */ + if (keepCount != mergeMap.size()) { + LongUnaryOperator howToMerge = mergeMap::get; + rewriteBuckets(offset, howToMerge); + if (deferringCollector() != null) { + ((BestBucketsDeferringCollector) deferringCollector()).rewriteBuckets(howToMerge); + } + } } - } - /* - * Now build the results! - */ - buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - Arrays.sort(rarestPerOrd[ordIdx], ORDER.comparator()); - result[ordIdx] = new LongRareTerms( - name, - ORDER, - metadata(), - format, - Arrays.asList(rarestPerOrd[ordIdx]), - maxDocCount, - filters[ordIdx] - ); + /* + * Now build the results! + */ + buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + + return LongRareTermsAggregator.this.buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { + LongRareTerms.Bucket[] buckets = rarestPerOrd.get(ordIdx); + Arrays.sort(buckets, ORDER.comparator()); + return new LongRareTerms(name, ORDER, metadata(), format, Arrays.asList(buckets), maxDocCount, filters.get(ordIdx)); + }); } - return result; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index f536b7f958ca2..6c2444379c8eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -178,8 +178,8 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.term, prototype.docCount, aggregations, - prototype.showDocCountError, - prototype.docCountError, + showTermDocCountError, + prototype.getDocCountError(), prototype.format ); } @@ -260,7 +260,7 @@ public InternalAggregation get() { @Override protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, LongTerms.Bucket prototype) { - return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format); + return new Bucket(prototype.term, docCount, aggs, showTermDocCountError, docCountError, format); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index 76202b6386a73..6ae47d5975479 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -18,6 +18,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -117,7 +118,7 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); } @@ -282,45 +283,50 @@ abstract class ResultStrategy ordered = buildPriorityQueue(size)) { - B spare = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - spare = emptyBucketBuilder.get(); + private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + try ( + LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true); + ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(Math.toIntExact(owningBucketOrds.size())) + ) { + for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { + long owningOrd = owningBucketOrds.get(ordIdx); + collectZeroDocEntriesIfNeeded(owningOrd, excludeDeletedDocs); + int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); + + try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { + B spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd); + Supplier emptyBucketBuilder = emptyBucketBuilder(owningOrd); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = emptyBucketBuilder.get(); + } + updateBucket(spare, ordsEnum, docCount); + spare = ordered.insertWithOverflow(spare); } - updateBucket(spare, ordsEnum, docCount); - spare = ordered.insertWithOverflow(spare); - } - topBucketsPerOrd[ordIdx] = buildBuckets((int) ordered.size()); - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd[ordIdx][i] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][i].getDocCount(); - finalizeBucket(topBucketsPerOrd[ordIdx][i]); + topBucketsPerOrd.set(ordIdx, buildBuckets((int) ordered.size())); + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + topBucketsPerOrd.get(ordIdx)[i] = ordered.pop(); + otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount()); + finalizeBucket(topBucketsPerOrd.get(ordIdx)[i]); + } } } - } - buildSubAggs(topBucketsPerOrd); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - result[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]); + buildSubAggs(topBucketsPerOrd); + + return MapStringTermsAggregator.this.buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx)) + ); } - return result; } /** @@ -361,7 +367,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws /** * Build an array to hold the "top" buckets for each ordinal. */ - abstract B[][] buildTopBucketsPerOrd(int size); + abstract ObjectArray buildTopBucketsPerOrd(long size); /** * Build an array of buckets for a particular ordinal to collect the @@ -379,7 +385,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws * Build the sub-aggregations into the buckets. This will usually * delegate to {@link #buildSubAggsForAllBuckets}. */ - abstract void buildSubAggs(B[][] topBucketsPerOrd) throws IOException; + abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException; /** * Turn the buckets into an aggregation result. @@ -501,8 +507,8 @@ void updateBucket(StringTerms.Bucket spare, BytesKeyedBucketOrds.BucketOrdsEnum } @Override - StringTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new StringTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -521,7 +527,7 @@ void finalizeBucket(StringTerms.Bucket bucket) { } @Override - void buildSubAggs(StringTerms.Bucket[][] topBucketsPerOrd) throws IOException { + void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); } @@ -637,8 +643,8 @@ void updateBucket(SignificantStringTerms.Bucket spare, BytesKeyedBucketOrds.Buck } @Override - SignificantStringTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new SignificantStringTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -657,7 +663,7 @@ void finalizeBucket(SignificantStringTerms.Bucket bucket) { } @Override - void buildSubAggs(SignificantStringTerms.Bucket[][] topBucketsPerOrd) throws IOException { + void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index d39348d80df14..ce89b95b76a05 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -15,6 +15,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -39,7 +40,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.List; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; @@ -136,7 +136,7 @@ private void collectValue(long val, int doc, long owningBucketOrd, LeafBucketCol } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { return resultStrategy.buildAggregations(owningBucketOrds); } @@ -163,48 +163,52 @@ public void collectDebugInfo(BiConsumer add) { abstract class ResultStrategy implements Releasable { - private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length); - long[] otherDocCounts = new long[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx], excludeDeletedDocs); - long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); - - int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); - try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { - B spare = null; - BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - spare = emptyBucketBuilder.get(); + private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + try ( + LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true); + ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.size()) + ) { + for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs); + long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd); + + int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); + try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { + B spare = null; + BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + Supplier emptyBucketBuilder = emptyBucketBuilder(owningBucketOrd); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = emptyBucketBuilder.get(); + } + updateBucket(spare, ordsEnum, docCount); + spare = ordered.insertWithOverflow(spare); } - updateBucket(spare, ordsEnum, docCount); - spare = ordered.insertWithOverflow(spare); - } - // Get the top buckets - B[] bucketsForOrd = buildBuckets((int) ordered.size()); - topBucketsPerOrd[ordIdx] = bucketsForOrd; - for (int b = (int) ordered.size() - 1; b >= 0; --b) { - topBucketsPerOrd[ordIdx][b] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + // Get the top buckets + B[] bucketsForOrd = buildBuckets((int) ordered.size()); + topBucketsPerOrd.set(ordIdx, bucketsForOrd); + for (int b = (int) ordered.size() - 1; b >= 0; --b) { + topBucketsPerOrd.get(ordIdx)[b] = ordered.pop(); + otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[b].getDocCount()); + } } } - } - buildSubAggs(topBucketsPerOrd); + buildSubAggs(topBucketsPerOrd); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - result[ordIdx] = buildResult(owningBucketOrds[ordIdx], otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]); + return NumericTermsAggregator.this.buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx)) + ); } - return result; } /** @@ -227,7 +231,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws /** * Build an array to hold the "top" buckets for each ordinal. */ - abstract B[][] buildTopBucketsPerOrd(int size); + abstract ObjectArray buildTopBucketsPerOrd(long size); /** * Build an array of buckets for a particular ordinal. These arrays @@ -258,7 +262,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws * Build the sub-aggregations into the buckets. This will usually * delegate to {@link #buildSubAggsForAllBuckets}. */ - abstract void buildSubAggs(B[][] topBucketsPerOrd) throws IOException; + abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException; /** * Collect extra entries for "zero" hit documents if they were requested @@ -297,7 +301,7 @@ final ObjectArrayPriorityQueue buildPriorityQueue(int size) { } @Override - final void buildSubAggs(B[][] topBucketsPerOrd) throws IOException { + final void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); } @@ -356,8 +360,8 @@ SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException { } @Override - LongTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new LongTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -397,7 +401,7 @@ LongTerms buildResult(long owningBucketOrd, long otherDocCount, LongTerms.Bucket bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, - List.of(topBuckets), + Arrays.asList(topBuckets), null ); } @@ -438,8 +442,8 @@ SortedNumericDocValues getValues(LeafReaderContext ctx) throws IOException { } @Override - DoubleTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new DoubleTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -479,7 +483,7 @@ DoubleTerms buildResult(long owningBucketOrd, long otherDocCount, DoubleTerms.Bu bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, - List.of(topBuckets), + Arrays.asList(topBuckets), null ); } @@ -551,8 +555,8 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } @Override - SignificantLongTerms.Bucket[][] buildTopBucketsPerOrd(int size) { - return new SignificantLongTerms.Bucket[size][]; + ObjectArray buildTopBucketsPerOrd(long size) { + return bigArrays().newObjectArray(size); } @Override @@ -583,7 +587,7 @@ ObjectArrayPriorityQueue buildPriorityQueue(int siz } @Override - void buildSubAggs(SignificantLongTerms.Bucket[][] topBucketsPerOrd) throws IOException { + void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); } @@ -601,7 +605,7 @@ SignificantLongTerms buildResult(long owningBucketOrd, long otherDocCoun, Signif subsetSizes.get(owningBucketOrd), supersetSize, significanceHeuristic, - List.of(topBuckets) + Arrays.asList(topBuckets) ); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java index 2bc2833f0ddce..8a2c9d52f4212 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java @@ -12,6 +12,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.util.BytesRefHash; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.SetBackedScalingCuckooFilter; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.FieldData; @@ -119,72 +121,74 @@ private void collectValue(BytesRef val, int doc, long owningBucketOrd, LeafBucke } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { /* * Collect the list of buckets, populate the filter with terms * that are too frequent, and figure out how to merge sub-buckets. */ - StringRareTerms.Bucket[][] rarestPerOrd = new StringRareTerms.Bucket[owningBucketOrds.length][]; - SetBackedScalingCuckooFilter[] filters = new SetBackedScalingCuckooFilter[owningBucketOrds.length]; - long keepCount = 0; - long[] mergeMap = new long[(int) bucketOrds.size()]; - Arrays.fill(mergeMap, -1); - long offset = 0; - for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) { - try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(1, bigArrays())) { - filters[owningOrdIdx] = newFilter(); - List builtBuckets = new ArrayList<>(); - BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]); - BytesRef scratch = new BytesRef(); - while (collectedBuckets.next()) { - collectedBuckets.readValue(scratch); - long docCount = bucketDocCount(collectedBuckets.ord()); - // if the key is below threshold, reinsert into the new ords - if (docCount <= maxDocCount) { - StringRareTerms.Bucket bucket = new StringRareTerms.Bucket(BytesRef.deepCopyOf(scratch), docCount, null, format); - bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(scratch); - mergeMap[(int) collectedBuckets.ord()] = bucket.bucketOrd; - builtBuckets.add(bucket); - keepCount++; - } else { - filters[owningOrdIdx].add(scratch); + try ( + ObjectArray rarestPerOrd = bigArrays().newObjectArray(owningBucketOrds.size()); + ObjectArray filters = bigArrays().newObjectArray(owningBucketOrds.size()) + ) { + try (LongArray mergeMap = bigArrays().newLongArray(bucketOrds.size())) { + mergeMap.fill(0, mergeMap.size(), -1); + long keepCount = 0; + long offset = 0; + for (long owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.size(); owningOrdIdx++) { + try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(1, bigArrays())) { + filters.set(owningOrdIdx, newFilter()); + List builtBuckets = new ArrayList<>(); + BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds.get(owningOrdIdx)); + BytesRef scratch = new BytesRef(); + while (collectedBuckets.next()) { + collectedBuckets.readValue(scratch); + long docCount = bucketDocCount(collectedBuckets.ord()); + // if the key is below threshold, reinsert into the new ords + if (docCount <= maxDocCount) { + checkRealMemoryCBForInternalBucket(); + StringRareTerms.Bucket bucket = new StringRareTerms.Bucket( + BytesRef.deepCopyOf(scratch), + docCount, + null, + format + ); + bucket.bucketOrd = offset + bucketsInThisOwningBucketToCollect.add(scratch); + mergeMap.set(collectedBuckets.ord(), bucket.bucketOrd); + builtBuckets.add(bucket); + keepCount++; + } else { + filters.get(owningOrdIdx).add(scratch); + } + } + rarestPerOrd.set(owningOrdIdx, builtBuckets.toArray(StringRareTerms.Bucket[]::new)); + offset += bucketsInThisOwningBucketToCollect.size(); } } - rarestPerOrd[owningOrdIdx] = builtBuckets.toArray(StringRareTerms.Bucket[]::new); - offset += bucketsInThisOwningBucketToCollect.size(); - } - } - /* - * Only merge/delete the ordinals if we have actually deleted one, - * to save on some redundant work. - */ - if (keepCount != mergeMap.length) { - LongUnaryOperator howToMerge = b -> mergeMap[(int) b]; - rewriteBuckets(offset, howToMerge); - if (deferringCollector() != null) { - ((BestBucketsDeferringCollector) deferringCollector()).rewriteBuckets(howToMerge); + /* + * Only merge/delete the ordinals if we have actually deleted one, + * to save on some redundant work. + */ + if (keepCount != mergeMap.size()) { + LongUnaryOperator howToMerge = mergeMap::get; + rewriteBuckets(offset, howToMerge); + if (deferringCollector() != null) { + ((BestBucketsDeferringCollector) deferringCollector()).rewriteBuckets(howToMerge); + } + } } - } - /* - * Now build the results! - */ - buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - Arrays.sort(rarestPerOrd[ordIdx], ORDER.comparator()); - result[ordIdx] = new StringRareTerms( - name, - ORDER, - metadata(), - format, - Arrays.asList(rarestPerOrd[ordIdx]), - maxDocCount, - filters[ordIdx] - ); + /* + * Now build the results! + */ + buildSubAggsForAllBuckets(rarestPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + + return StringRareTermsAggregator.this.buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { + StringRareTerms.Bucket[] buckets = rarestPerOrd.get(ordIdx); + Arrays.sort(buckets, ORDER.comparator()); + return new StringRareTerms(name, ORDER, metadata(), format, Arrays.asList(buckets), maxDocCount, filters.get(ordIdx)); + }); } - return result; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 5faf6e0aaaedf..2370827230c47 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -184,15 +184,15 @@ public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) prototype.termBytes, prototype.docCount, aggregations, - prototype.showDocCountError, - prototype.docCountError, + showTermDocCountError, + prototype.getDocCountError(), prototype.format ); } @Override protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, StringTerms.Bucket prototype) { - return new Bucket(prototype.termBytes, docCount, aggs, prototype.showDocCountError, docCountError, format); + return new Bucket(prototype.termBytes, docCount, aggs, showTermDocCountError, docCountError, format); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 7755f1db6a3ee..e82a2b7fe9235 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -111,7 +111,12 @@ public boolean canLeadReduction() { @Override public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - return doXContentCommon(builder, params, 0L, 0, Collections.emptyList()); + return doXContentCommon(builder, params, false, 0L, 0, Collections.emptyList()); + } + + @Override + protected boolean getShowDocCountError() { + return false; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java index 8742136c86ec6..cf65f1ff7c835 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -36,11 +37,7 @@ protected MetricsAggregator(String name, AggregationContext context, Aggregator public abstract InternalAggregation buildAggregation(long owningBucketOrd) throws IOException; @Override - public final InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - results[ordIdx] = buildAggregation(owningBucketOrds[ordIdx]); - } - return results; + public final InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> buildAggregation(owningBucketOrds.get(ordIdx))); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 6bc667d4359b1..546586a9ff3c3 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -191,7 +191,16 @@ protected SearchHit nextDoc(int doc) throws IOException { } }; - SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), docIdsToLoad); + SearchHit[] hits = docsIterator.iterate( + context.shardTarget(), + context.searcher().getIndexReader(), + docIdsToLoad, + context.request().allowPartialSearchResults() + ); + + if (docsIterator.isTimedOut()) { + context.queryResult().searchTimedOut(true); + } if (context.isCancelled()) { for (SearchHit hit : hits) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java index 682ee4b375668..df4e7649ffd3b 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java @@ -13,7 +13,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.query.SearchTimeoutException; import java.io.IOException; import java.util.Arrays; @@ -27,6 +30,12 @@ */ abstract class FetchPhaseDocsIterator { + private boolean timedOut = false; + + public boolean isTimedOut() { + return timedOut; + } + /** * Called when a new leaf reader is reached * @param ctx the leaf reader for this set of doc ids @@ -44,7 +53,7 @@ abstract class FetchPhaseDocsIterator { /** * Iterate over a set of docsIds within a particular shard and index reader */ - public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader indexReader, int[] docIds) { + public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader indexReader, int[] docIds, boolean allowPartialResults) { SearchHit[] searchHits = new SearchHit[docIds.length]; DocIdToIndex[] docs = new DocIdToIndex[docIds.length]; for (int index = 0; index < docIds.length; index++) { @@ -58,30 +67,55 @@ public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader inde LeafReaderContext ctx = indexReader.leaves().get(leafOrd); int endReaderIdx = endReaderIdx(ctx, 0, docs); int[] docsInLeaf = docIdsInLeaf(0, endReaderIdx, docs, ctx.docBase); - setNextReader(ctx, docsInLeaf); - for (int i = 0; i < docs.length; i++) { - if (i >= endReaderIdx) { - leafOrd = ReaderUtil.subIndex(docs[i].docId, indexReader.leaves()); - ctx = indexReader.leaves().get(leafOrd); - endReaderIdx = endReaderIdx(ctx, i, docs); - docsInLeaf = docIdsInLeaf(i, endReaderIdx, docs, ctx.docBase); - setNextReader(ctx, docsInLeaf); + try { + setNextReader(ctx, docsInLeaf); + } catch (ContextIndexSearcher.TimeExceededException timeExceededException) { + if (allowPartialResults) { + timedOut = true; + return SearchHits.EMPTY; } - currentDoc = docs[i].docId; - assert searchHits[docs[i].index] == null; - searchHits[docs[i].index] = nextDoc(docs[i].docId); + throw new SearchTimeoutException(shardTarget, "Time exceeded"); } - } catch (Exception e) { - for (SearchHit searchHit : searchHits) { - if (searchHit != null) { - searchHit.decRef(); + for (int i = 0; i < docs.length; i++) { + try { + if (i >= endReaderIdx) { + leafOrd = ReaderUtil.subIndex(docs[i].docId, indexReader.leaves()); + ctx = indexReader.leaves().get(leafOrd); + endReaderIdx = endReaderIdx(ctx, i, docs); + docsInLeaf = docIdsInLeaf(i, endReaderIdx, docs, ctx.docBase); + setNextReader(ctx, docsInLeaf); + } + currentDoc = docs[i].docId; + assert searchHits[docs[i].index] == null; + searchHits[docs[i].index] = nextDoc(docs[i].docId); + } catch (ContextIndexSearcher.TimeExceededException timeExceededException) { + if (allowPartialResults) { + timedOut = true; + SearchHit[] partialSearchHits = new SearchHit[i]; + System.arraycopy(searchHits, 0, partialSearchHits, 0, i); + return partialSearchHits; + } + purgeSearchHits(searchHits); + throw new SearchTimeoutException(shardTarget, "Time exceeded"); } } + } catch (SearchTimeoutException e) { + throw e; + } catch (Exception e) { + purgeSearchHits(searchHits); throw new FetchPhaseExecutionException(shardTarget, "Error running fetch phase for doc [" + currentDoc + "]", e); } return searchHits; } + private static void purgeSearchHits(SearchHit[] searchHits) { + for (SearchHit searchHit : searchHits) { + if (searchHit != null) { + searchHit.decRef(); + } + } + } + private static int endReaderIdx(LeafReaderContext currentReaderContext, int index, DocIdToIndex[] docs) { int firstInNextReader = currentReaderContext.docBase + currentReaderContext.reader().maxDoc(); int i = index + 1; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index e14177adba467..a8db0f26d2966 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -64,7 +64,6 @@ public abstract class AbstractHighlighterBuilder> BiFunction {}, FORCE_SOURCE_FIELD); // force_source is ignored parser.declareInt(HB::phraseLimit, PHRASE_LIMIT_FIELD); parser.declareInt(HB::maxAnalyzedOffset, MAX_ANALYZED_OFFSET_FIELD); parser.declareObject(HB::options, (XContentParser p, Void c) -> { diff --git a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java index fff1990c29750..90e84acc7cad5 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java @@ -10,6 +10,7 @@ package org.elasticsearch.search.profile.aggregation; import org.apache.lucene.search.ScoreMode; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -68,7 +69,7 @@ public BucketComparator bucketComparator(String key, SortOrder order) { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { Timer timer = profileBreakdown.getNewTimer(AggregationTimingType.BUILD_AGGREGATION); InternalAggregation[] result; timer.start(); diff --git a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java index 68463eecfb11d..e64bbe3c39d79 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java +++ b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java @@ -35,9 +35,9 @@ public final class RankFeatureShardPhase { public static final RankFeatureShardResult EMPTY_RESULT = new RankFeatureShardResult(new RankFeatureDoc[0]); - public RankFeatureShardPhase() {} + private RankFeatureShardPhase() {} - public void prepareForFetch(SearchContext searchContext, RankFeatureShardRequest request) { + public static void prepareForFetch(SearchContext searchContext, RankFeatureShardRequest request) { if (logger.isTraceEnabled()) { logger.trace("{}", new SearchContextSourcePrinter(searchContext)); } @@ -58,7 +58,7 @@ public void prepareForFetch(SearchContext searchContext, RankFeatureShardRequest } } - public void processFetch(SearchContext searchContext) { + public static void processFetch(SearchContext searchContext) { if (logger.isTraceEnabled()) { logger.trace("{}", new SearchContextSourcePrinter(searchContext)); } @@ -92,7 +92,7 @@ public void processFetch(SearchContext searchContext) { } } - private RankFeaturePhaseRankShardContext shardContext(SearchContext searchContext) { + private static RankFeaturePhaseRankShardContext shardContext(SearchContext searchContext) { return searchContext.request().source() != null && searchContext.request().source().rankBuilder() != null ? searchContext.request().source().rankBuilder().buildRankFeaturePhaseShardContext() : null; diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index 0ac3b42dd5b10..5832b93b9462f 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -158,6 +158,11 @@ private static void parseCompoundSortField(XContentParser parser, List buildSort(List> sortBuilders, SearchExecutionContext context) throws IOException { + return buildSort(sortBuilders, context, true); + } + + public static Optional buildSort(List> sortBuilders, SearchExecutionContext context, boolean optimize) + throws IOException { List sortFields = new ArrayList<>(sortBuilders.size()); List sortFormats = new ArrayList<>(sortBuilders.size()); for (SortBuilder builder : sortBuilders) { @@ -172,9 +177,13 @@ public static Optional buildSort(List> sortBuilde if (sortFields.size() > 1) { sort = true; } else { - SortField sortField = sortFields.get(0); - if (sortField.getType() == SortField.Type.SCORE && sortField.getReverse() == false) { - sort = false; + if (optimize) { + SortField sortField = sortFields.get(0); + if (sortField.getType() == SortField.Type.SCORE && sortField.getReverse() == false) { + sort = false; + } else { + sort = true; + } } else { sort = true; } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 7b2066f243771..234c0239a68ce 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -425,9 +425,9 @@ public void onResponse(ShardSnapshotResult shardSnapshotResult) { final ShardGeneration newGeneration = shardSnapshotResult.getGeneration(); assert newGeneration != null; assert newGeneration.equals(snapshotStatus.generation()); - if (logger.isDebugEnabled()) { + if (logger.isTraceEnabled()) { final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); - logger.debug( + logger.trace( "[{}][{}] completed snapshot to [{}] with status [{}] at generation [{}]", shardId, snapshot, diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index cc5e96327b241..f55e3740aaa8f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -1062,13 +1062,6 @@ public static boolean assertCurrentThreadPool(String... permittedThreadPoolNames return true; } - public static boolean assertTestThreadPool() { - final var threadName = Thread.currentThread().getName(); - final var executorName = EsExecutors.executorName(threadName); - assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") : threadName + " is not a test thread"; - return true; - } - public static boolean assertInSystemContext(ThreadPool threadPool) { final var threadName = Thread.currentThread().getName(); assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") || threadPool.getThreadContext().isSystemContext() diff --git a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java index 7bf172388eccd..0db3de9abdb7b 100644 --- a/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/BytesTransportRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.RefCounted; import java.io.IOException; @@ -22,7 +21,7 @@ * A specialized, bytes only request, that can potentially be optimized on the network * layer, specifically for the same large buffer send to several nodes. */ -public class BytesTransportRequest extends TransportRequest implements RefCounted { +public class BytesTransportRequest extends TransportRequest { final ReleasableBytesReference bytes; private final TransportVersion version; diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index 5cd8935f72403..12965152f260c 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -7,13 +7,12 @@ # License v3.0 only", or the "Server Side Public License, v 1". # +org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures org.elasticsearch.action.bulk.BulkFeatures org.elasticsearch.features.FeatureInfrastructureFeatures org.elasticsearch.health.HealthFeatures -org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures -org.elasticsearch.indices.IndicesFeatures org.elasticsearch.repositories.RepositoriesFeatures org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures @@ -24,3 +23,4 @@ org.elasticsearch.search.retriever.RetrieversFeatures org.elasticsearch.script.ScriptFeatures org.elasticsearch.reservedstate.service.FileSettingsFeatures org.elasticsearch.cluster.routing.RoutingFeatures +org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index ba575cc642a81..faeb7fe848159 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -132,4 +132,6 @@ 8.15.2,8702003 8.15.3,8702003 8.15.4,8702003 +8.15.5,8702003 8.16.0,8772001 +8.16.1,8772004 diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt index f9a8237d63717..69aa5102dec8d 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.txt @@ -44,3 +44,4 @@ FORMING_SINGLE_NODE_CLUSTERS modules-discover CIRCUIT_BREAKER_ERRORS circuit-breaker-errors.html ALLOCATION_EXPLAIN_NO_COPIES cluster-allocation-explain.html#no-valid-shard-copy ALLOCATION_EXPLAIN_MAX_RETRY cluster-allocation-explain.html#maximum-number-of-retries-exceeded +SECURE_SETTINGS secure-settings.html diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index c54aea88613f5..1fc8bd8648ad6 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -132,4 +132,6 @@ 8.15.2,8512000 8.15.3,8512000 8.15.4,8512000 +8.15.5,8512000 8.16.0,8518000 +8.16.1,8518000 diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 9863d2156422d..f5a23cf68a26e 100644 --- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -509,12 +510,12 @@ public void testGetDetailedMessage() { public void testToXContent() throws IOException { { ElasticsearchException e = new ElasticsearchException("test"); - assertExceptionAsJson(e, """ + assertThrowableAsJson(e, """ {"type":"exception","reason":"test"}"""); } { ElasticsearchException e = new IndexShardRecoveringException(new ShardId("_test", "_0", 5)); - assertExceptionAsJson(e, """ + assertThrowableAsJson(e, """ { "type": "index_shard_recovering_exception", "reason": "CurrentState[RECOVERING] Already recovering", @@ -529,7 +530,7 @@ public void testToXContent() throws IOException { "foo", new IllegalStateException("bar") ); - assertExceptionAsJson(e, """ + assertThrowableAsJson(e, """ { "type": "illegal_state_exception", "reason": "bar" @@ -537,7 +538,7 @@ public void testToXContent() throws IOException { } { ElasticsearchException e = new ElasticsearchException(new IllegalArgumentException("foo")); - assertExceptionAsJson(e, """ + assertThrowableAsJson(e, """ { "type": "exception", "reason": "java.lang.IllegalArgumentException: foo", @@ -552,7 +553,7 @@ public void testToXContent() throws IOException { "foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", new RuntimeException("foobar"))) ); - assertExceptionAsJson(ex, """ + assertThrowableAsJson(ex, """ { "type": "exception", "reason": "foo", @@ -573,7 +574,7 @@ public void testToXContent() throws IOException { { ElasticsearchException e = new ElasticsearchException("foo", new IllegalStateException("bar")); - assertExceptionAsJson(e, """ + assertThrowableAsJson(e, """ { "type": "exception", "reason": "foo", @@ -602,21 +603,91 @@ public void testToXContent() throws IOException { } } + public void testGenerateFailureToXContentWithNoDetails() throws IOException { + { + Exception ex = new FileNotFoundException("foo not found"); + for (int i = 0; i < randomInt(10); i++) { + ex = new RemoteTransportException("foobar", ex); + } + assertFailureAsJson(ex, """ + {"error":{"type":"file_not_found_exception","reason":"foo not found"}}""", false); + } + { + ParsingException ex = new ParsingException(1, 2, "foobar", null); + assertFailureAsJson(ex, """ + {"error":{"type":"parsing_exception","reason":"foobar"}}""", false); + } + + { // header and metadata shouldn't be rendered + ParsingException ex = new ParsingException(1, 2, "foobar", null); + ex.addMetadata("es.test1", "value1"); + ex.addMetadata("es.test2", "value2"); + ex.addHeader("test", "some value"); + ex.addHeader("test_multi", "some value", "another value"); + + String expected = """ + {"error":{"type": "parsing_exception","reason": "foobar"}}"""; + assertFailureAsJson(ex, expected, false); + } + } + + public void testGenerateFailureToXContentWithDetails() throws IOException { + { + Exception ex = new FileNotFoundException("foo not found"); + for (int i = 0; i < randomInt(10); i++) { + ex = new RemoteTransportException("foobar", ex); + } + assertFailureAsJson(ex, """ + {"error":{"type":"file_not_found_exception","reason":"foo not found", + "root_cause":[{"type":"file_not_found_exception","reason":"foo not found"}]}}""", true); + } + { + ParsingException ex = new ParsingException(1, 2, "foobar", null); + assertFailureAsJson(ex, """ + {"error":{"type":"parsing_exception","reason":"foobar","line":1,"col":2, + "root_cause":[{"type":"parsing_exception","reason":"foobar","line":1,"col":2}]}}""", true); + } + + { // render header and metadata + ParsingException ex = new ParsingException(1, 2, "foobar", null); + ex.addMetadata("es.test1", "value1"); + ex.addMetadata("es.test2", "value2"); + ex.addHeader("test", "some value"); + ex.addHeader("test_multi", "some value", "another value"); + + String expectedFragment = """ + { + "type": "parsing_exception", + "reason": "foobar", + "line": 1, + "col": 2, + "test1": "value1", + "test2": "value2", + "header": { + "test_multi": [ + "some value", + "another value" + ], + "test": "some value" + } + """; + String expected = "{\"error\":" + expectedFragment + ",\"root_cause\":[" + expectedFragment + "}]}}"; + assertFailureAsJson(ex, expected, true); + } + } + public void testGenerateThrowableToXContent() throws IOException { { - Exception ex; - if (randomBoolean()) { - // just a wrapper which is omitted - ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found")); - } else { - ex = new FileNotFoundException("foo not found"); + Exception ex = new FileNotFoundException("foo not found"); + for (int i = 0; i < randomInt(10); i++) { + ex = new RemoteTransportException("foobar", ex); } - assertExceptionAsJson(ex, """ + assertThrowableAsJson(ex, """ {"type":"file_not_found_exception","reason":"foo not found"}"""); } { ParsingException ex = new ParsingException(1, 2, "foobar", null); - assertExceptionAsJson(ex, """ + assertThrowableAsJson(ex, """ {"type":"parsing_exception","reason":"foobar","line":1,"col":2}"""); } @@ -656,7 +727,7 @@ public void testGenerateThrowableToXContent() throws IOException { "test": "some value" } }"""; - assertExceptionAsJson(ex, expected); + assertThrowableAsJson(ex, expected); } } @@ -697,7 +768,7 @@ public void testToXContentWithHeadersAndMetadata() throws IOException { } }"""; - assertExceptionAsJson(e, expectedJson); + assertThrowableAsJson(e, expectedJson); ElasticsearchException parsed; try (XContentParser parser = createParser(XContentType.JSON.xContent(), expectedJson)) { @@ -859,7 +930,7 @@ public void testFromXContentWithHeadersAndMetadata() throws IOException { } assertNotNull(parsed); - assertEquals(parsed.getMessage(), "Elasticsearch exception [type=exception, reason=foo]"); + assertEquals("Elasticsearch exception [type=exception, reason=foo]", parsed.getMessage()); assertThat(parsed.getHeaderKeys(), hasSize(1)); assertThat(parsed.getHeader("foo_1"), hasItem("foo1")); assertThat(parsed.getMetadataKeys(), hasSize(1)); @@ -996,11 +1067,40 @@ public void testThrowableToAndFromXContent() throws IOException { public void testUnknownFailureToAndFromXContent() throws IOException { final XContent xContent = randomFrom(XContentType.values()).xContent(); - BytesReference failureBytes = toShuffledXContent((builder, params) -> { - // Prints a null failure using generateFailureXContent() - ElasticsearchException.generateFailureXContent(builder, params, null, randomBoolean()); - return builder; - }, xContent.type(), ToXContent.EMPTY_PARAMS, randomBoolean()); + // Prints a null failure using generateFailureXContent() + BytesReference failureBytes = toShuffledXContent( + (builder, params) -> ElasticsearchException.generateFailureXContent(builder, params, null, randomBoolean()), + xContent.type(), + ToXContent.EMPTY_PARAMS, + randomBoolean() + ); + + ElasticsearchException parsedFailure; + try (XContentParser parser = createParser(xContent, failureBytes)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + parsedFailure = ElasticsearchException.failureFromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + + // Failure was null, expecting a "unknown" reason + assertEquals("Elasticsearch exception [type=unknown, reason=unknown]", parsedFailure.getMessage()); + assertEquals(0, parsedFailure.getHeaders().size()); + assertEquals(0, parsedFailure.getMetadata().size()); + } + + public void testUnknownFailureToAndFromXContentV8() throws IOException { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + + // Prints a null failure using generateFailureXContent() + BytesReference failureBytes = toShuffledXContent( + (builder, params) -> ElasticsearchException.generateFailureXContent(builder, params, null, randomBoolean()), + xContent.type(), + RestApiVersion.V_8, + ToXContent.EMPTY_PARAMS, + randomBoolean() + ); ElasticsearchException parsedFailure; try (XContentParser parser = createParser(xContent, failureBytes)) { @@ -1021,10 +1121,46 @@ public void testFailureToAndFromXContentWithNoDetails() throws IOException { final XContent xContent = randomFrom(XContentType.values()).xContent(); final Exception failure = (Exception) randomExceptions().v1(); - BytesReference failureBytes = toShuffledXContent((builder, params) -> { - ElasticsearchException.generateFailureXContent(builder, params, failure, false); - return builder; - }, xContent.type(), ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference failureBytes = toShuffledXContent( + (builder, params) -> ElasticsearchException.generateFailureXContent(builder, params, failure, false), + xContent.type(), + ToXContent.EMPTY_PARAMS, + randomBoolean() + ); + + try (XContentParser parser = createParser(xContent, failureBytes)) { + failureBytes = BytesReference.bytes(shuffleXContent(parser, randomBoolean())); + } + + ElasticsearchException parsedFailure; + try (XContentParser parser = createParser(xContent, failureBytes)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + parsedFailure = ElasticsearchException.failureFromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + assertNotNull(parsedFailure); + + String type = ElasticsearchException.getExceptionName(failure); + String reason = failure.getMessage(); + assertEquals(ElasticsearchException.buildMessage(type, reason, null), parsedFailure.getMessage()); + assertEquals(0, parsedFailure.getHeaders().size()); + assertEquals(0, parsedFailure.getMetadata().size()); + assertNull(parsedFailure.getCause()); + } + + public void testFailureToAndFromXContentWithNoDetailsV8() throws IOException { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + + final Exception failure = (Exception) randomExceptions().v1(); + BytesReference failureBytes = toShuffledXContent( + (builder, params) -> ElasticsearchException.generateFailureXContent(builder, params, failure, false), + xContent.type(), + RestApiVersion.V_8, + ToXContent.EMPTY_PARAMS, + randomBoolean() + ); try (XContentParser parser = createParser(xContent, failureBytes)) { failureBytes = BytesReference.bytes(shuffleXContent(parser, randomBoolean())); @@ -1165,10 +1301,12 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { } Exception finalFailure = failure; - BytesReference failureBytes = toShuffledXContent((builder, params) -> { - ElasticsearchException.generateFailureXContent(builder, params, finalFailure, true); - return builder; - }, xContent.type(), ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference failureBytes = toShuffledXContent( + (builder, params) -> ElasticsearchException.generateFailureXContent(builder, params, finalFailure, true), + xContent.type(), + ToXContent.EMPTY_PARAMS, + randomBoolean() + ); try (XContentParser parser = createParser(xContent, failureBytes)) { failureBytes = BytesReference.bytes(shuffleXContent(parser, randomBoolean())); @@ -1197,13 +1335,20 @@ private static void assertToXContentAsJson(ToXContent e, String expectedJson) th assertToXContentEquivalent(new BytesArray(expectedJson), actual, XContentType.JSON); } - private static void assertExceptionAsJson(Exception e, String expectedJson) throws IOException { + private static void assertThrowableAsJson(Throwable e, String expectedJson) throws IOException { assertToXContentAsJson((builder, params) -> { ElasticsearchException.generateThrowableXContent(builder, params, e); return builder; }, expectedJson); } + private static void assertFailureAsJson(Exception e, String expectedJson, boolean detailed) throws IOException { + assertToXContentAsJson( + (builder, params) -> ElasticsearchException.generateFailureXContent(builder, params, e, detailed), + expectedJson + ); + } + public static void assertDeepEquals(ElasticsearchException expected, ElasticsearchException actual) { do { if (expected == null) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 7a31f0dcb4631..a7058e5d6cd8c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -1057,6 +1057,8 @@ public static NodeStats createNodeStats() { randomLongBetween(0, maxStatValue), randomLongBetween(0, maxStatValue), randomLongBetween(0, maxStatValue), + randomLongBetween(0, maxStatValue), + randomLongBetween(0, maxStatValue), randomLongBetween(0, maxStatValue) ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java index e9188d9cb8f0d..a72630c327ea2 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot.PerClusterCCSTelemetry; import org.elasticsearch.action.admin.cluster.stats.LongMetric.LongMetricValue; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; @@ -32,9 +33,13 @@ public class CCSTelemetrySnapshotTests extends AbstractWireSerializingTestCase { private LongMetricValue randomLongMetricValue() { + return randomLongMetricValueBetween(0, 1_000_000); + } + + private LongMetricValue randomLongMetricValueBetween(int low, int high) { LongMetric v = new LongMetric(); for (int i = 0; i < randomIntBetween(5, 10); i++) { - v.record(randomIntBetween(0, 1_000_000)); + v.record(randomIntBetween(low, high)); } return v.getValue(); } @@ -330,4 +335,21 @@ private String readJSONFromResource(String fileName) throws IOException { return new String(inputStream.readAllBytes(), StandardCharsets.UTF_8); } } + + public void testRanges() throws IOException { + var value1 = randomLongMetricValueBetween(1_000_000, 10_000_000); + var count1 = value1.count(); + var max1 = value1.max(); + var output = new BytesStreamOutput(); + value1.writeTo(output); + var value1Read = LongMetricValue.fromStream(output.bytes().streamInput()); + var value2 = randomLongMetricValueBetween(0, 100); + var count2 = value2.count(); + output = new BytesStreamOutput(); + value2.writeTo(output); + var value2Read = LongMetricValue.fromStream(output.bytes().streamInput()); + value2Read.add(value1Read); + assertThat(value2Read.count(), equalTo(count1 + count2)); + assertThat(value2Read.max(), equalTo(max1)); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java index 2c374c7d26dee..96954458c18c4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.script.Script; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -29,7 +30,15 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.index.mapper.SourceFieldMapper.Mode.DISABLED; +import static org.elasticsearch.index.mapper.SourceFieldMapper.Mode.STORED; +import static org.elasticsearch.index.mapper.SourceFieldMapper.Mode.SYNTHETIC; +import static org.hamcrest.Matchers.equalTo; public class MappingStatsTests extends AbstractWireSerializingTestCase { @@ -203,7 +212,10 @@ public void testToXContent() { "doc_max" : 0, "doc_total" : 0 } - ] + ], + "source_modes" : { + "stored" : 2 + } } }""", Strings.toString(mappingStats, true, true)); } @@ -332,7 +344,10 @@ public void testToXContentWithSomeSharedMappings() { "doc_max" : 0, "doc_total" : 0 } - ] + ], + "source_modes" : { + "stored" : 3 + } } }""", Strings.toString(mappingStats, true, true)); } @@ -362,7 +377,24 @@ protected MappingStats createTestInstance() { if (randomBoolean()) { runtimeFieldStats.add(randomRuntimeFieldStats("long")); } - return new MappingStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), stats, runtimeFieldStats); + Map sourceModeUsageCount = randomBoolean() + ? Map.of() + : Map.of( + STORED.toString().toLowerCase(Locale.ENGLISH), + randomNonNegativeInt(), + SYNTHETIC.toString().toLowerCase(Locale.ENGLISH), + randomNonNegativeInt(), + DISABLED.toString().toLowerCase(Locale.ENGLISH), + randomNonNegativeInt() + ); + return new MappingStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + stats, + runtimeFieldStats, + sourceModeUsageCount + ); } private static FieldStats randomFieldStats(String type) { @@ -410,7 +442,8 @@ protected MappingStats mutateInstance(MappingStats instance) { long totalFieldCount = instance.getTotalFieldCount().getAsLong(); long totalDeduplicatedFieldCount = instance.getTotalDeduplicatedFieldCount().getAsLong(); long totalMappingSizeBytes = instance.getTotalMappingSizeBytes().getAsLong(); - switch (between(1, 5)) { + var sourceModeUsageCount = new HashMap<>(instance.getSourceModeUsageCount()); + switch (between(1, 6)) { case 1 -> { boolean remove = fieldTypes.size() > 0 && randomBoolean(); if (remove) { @@ -435,8 +468,22 @@ protected MappingStats mutateInstance(MappingStats instance) { case 3 -> totalFieldCount = randomValueOtherThan(totalFieldCount, ESTestCase::randomNonNegativeLong); case 4 -> totalDeduplicatedFieldCount = randomValueOtherThan(totalDeduplicatedFieldCount, ESTestCase::randomNonNegativeLong); case 5 -> totalMappingSizeBytes = randomValueOtherThan(totalMappingSizeBytes, ESTestCase::randomNonNegativeLong); + case 6 -> { + if (sourceModeUsageCount.isEmpty() == false) { + sourceModeUsageCount.remove(sourceModeUsageCount.keySet().stream().findFirst().get()); + } else { + sourceModeUsageCount.put("stored", randomNonNegativeInt()); + } + } } - return new MappingStats(totalFieldCount, totalDeduplicatedFieldCount, totalMappingSizeBytes, fieldTypes, runtimeFieldTypes); + return new MappingStats( + totalFieldCount, + totalDeduplicatedFieldCount, + totalMappingSizeBytes, + fieldTypes, + runtimeFieldTypes, + sourceModeUsageCount + ); } public void testDenseVectorType() { @@ -531,4 +578,39 @@ public void testWriteTo() throws IOException { assertEquals(instance.getFieldTypeStats(), deserialized.getFieldTypeStats()); assertEquals(instance.getRuntimeFieldStats(), deserialized.getRuntimeFieldStats()); } + + public void testSourceModes() { + var builder = Metadata.builder(); + int numStoredIndices = randomIntBetween(1, 5); + int numSyntheticIndices = randomIntBetween(1, 5); + int numDisabledIndices = randomIntBetween(1, 5); + for (int i = 0; i < numSyntheticIndices; i++) { + IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo-synthetic-" + i).settings( + indexSettings(IndexVersion.current(), 4, 1).put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "synthetic") + ); + builder.put(indexMetadata); + } + for (int i = 0; i < numStoredIndices; i++) { + IndexMetadata.Builder indexMetadata; + if (randomBoolean()) { + indexMetadata = new IndexMetadata.Builder("foo-stored-" + i).settings( + indexSettings(IndexVersion.current(), 4, 1).put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "stored") + ); + } else { + indexMetadata = new IndexMetadata.Builder("foo-stored-" + i).settings(indexSettings(IndexVersion.current(), 4, 1)); + } + builder.put(indexMetadata); + } + for (int i = 0; i < numDisabledIndices; i++) { + IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo-disabled-" + i).settings( + indexSettings(IndexVersion.current(), 4, 1).put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "disabled") + ); + builder.put(indexMetadata); + } + var mappingStats = MappingStats.of(builder.build(), () -> {}); + assertThat(mappingStats.getSourceModeUsageCount().get("synthetic"), equalTo(numSyntheticIndices)); + assertThat(mappingStats.getSourceModeUsageCount().get("stored"), equalTo(numStoredIndices)); + assertThat(mappingStats.getSourceModeUsageCount().get("disabled"), equalTo(numDisabledIndices)); + } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java index ffdd588764699..023e7693f8a47 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java @@ -57,9 +57,15 @@ public void testToXContent() throws IOException { BytesReference expectedRequestBody = BytesReference.bytes(builder); - PutStoredScriptRequest request = new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); - request.id("test1"); - request.content(expectedRequestBody, xContentType); + PutStoredScriptRequest request = new PutStoredScriptRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test1", + null, + expectedRequestBody, + xContentType, + StoredScriptSource.parse(expectedRequestBody, xContentType) + ); XContentBuilder requestBuilder = XContentBuilder.builder(xContentType.xContent()); requestBuilder.startObject(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index 0b9cba837583d..5cf7b438b41ab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -42,6 +42,7 @@ import java.io.IOException; import java.io.Reader; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -250,6 +251,32 @@ public void testFillsAttributes() throws IOException { assertEquals("", tokens.get(3).getType()); } + public void testAnalyzerWithTwoTextsAndNoIndexName() throws IOException { + AnalyzeAction.Request request = new AnalyzeAction.Request(); + + for (String analyzer : Arrays.asList("standard", "simple", "stop", "keyword", "whitespace", "classic")) { + request.analyzer(analyzer); + request.text("a a", "b b"); + + AnalyzeAction.Response analyzeIndex = TransportAnalyzeAction.analyze(request, registry, mockIndexService(), maxTokenCount); + List tokensIndex = analyzeIndex.getTokens(); + + AnalyzeAction.Response analyzeNoIndex = TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount); + List tokensNoIndex = analyzeNoIndex.getTokens(); + + assertEquals(tokensIndex.size(), tokensNoIndex.size()); + for (int i = 0; i < tokensIndex.size(); i++) { + AnalyzeAction.AnalyzeToken withIndex = tokensIndex.get(i); + AnalyzeAction.AnalyzeToken withNoIndex = tokensNoIndex.get(i); + + assertEquals(withIndex.getStartOffset(), withNoIndex.getStartOffset()); + assertEquals(withIndex.getEndOffset(), withNoIndex.getEndOffset()); + assertEquals(withIndex.getPosition(), withNoIndex.getPosition()); + assertEquals(withIndex.getType(), withNoIndex.getType()); + } + } + } + public void testWithIndexAnalyzers() throws IOException { AnalyzeAction.Request request = new AnalyzeAction.Request(); request.text("the quick brown fox"); diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/ReindexDataStreamResponseTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/ReindexDataStreamResponseTests.java new file mode 100644 index 0000000000000..fe839c28aab88 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/datastreams/ReindexDataStreamResponseTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.action.datastreams; + +import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.equalTo; + +public class ReindexDataStreamResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return ReindexDataStreamResponse::new; + } + + @Override + protected ReindexDataStreamResponse createTestInstance() { + return new ReindexDataStreamResponse(randomAlphaOfLength(40)); + } + + @Override + protected ReindexDataStreamResponse mutateInstance(ReindexDataStreamResponse instance) { + return createTestInstance(); + } + + public void testToXContent() throws IOException { + ReindexDataStreamResponse response = createTestInstance(); + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent)) { + builder.humanReadable(true); + response.toXContent(builder, EMPTY_PARAMS); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + assertThat(parser.map(), equalTo(Map.of("task", response.getTaskId()))); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java index 4e6b2b17b2554..61284a49b2502 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java @@ -79,7 +79,7 @@ public void testXContentDeserialization() throws IOException { assertEquals(actualPipelines.size(), parsedPipelines.size()); for (PipelineConfiguration pipeline : parsedPipelines) { assertTrue(pipelinesMap.containsKey(pipeline.getId())); - assertEquals(pipelinesMap.get(pipeline.getId()).getConfigAsMap(), pipeline.getConfigAsMap()); + assertEquals(pipelinesMap.get(pipeline.getId()).getConfig(), pipeline.getConfig()); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index 0bc5c69d8ad4b..dc1698e3459ec 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -134,7 +134,12 @@ public void setup() { ); fileSettingsService = spy( - new FileSettingsService(clusterService, mock(ReservedClusterStateService.class), newEnvironment(Settings.EMPTY)) + new FileSettingsService( + clusterService, + mock(ReservedClusterStateService.class), + newEnvironment(Settings.EMPTY), + new FileSettingsService.FileSettingsHealthIndicatorService() + ) ); } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java index 58ff9ec421889..983c2e7d65032 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java @@ -16,14 +16,14 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.nio.charset.StandardCharsets; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.jsonSimulatePipelineRequest; import static org.hamcrest.CoreMatchers.equalTo; public class SimulatePipelineRequestTests extends ESTestCase { public void testSerialization() throws IOException { - SimulatePipelineRequest request = new SimulatePipelineRequest(new BytesArray(""), XContentType.JSON); + SimulatePipelineRequest request = jsonSimulatePipelineRequest(new BytesArray("")); // Sometimes we set an id if (randomBoolean()) { request.setId(randomAlphaOfLengthBetween(1, 10)); @@ -44,10 +44,7 @@ public void testSerialization() throws IOException { } public void testSerializationWithXContent() throws IOException { - SimulatePipelineRequest request = new SimulatePipelineRequest( - new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), - XContentType.JSON - ); + SimulatePipelineRequest request = jsonSimulatePipelineRequest("{}"); assertEquals(XContentType.JSON, request.getXContentType()); BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index f8ecdbd062054..725a4583d104a 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -101,7 +101,7 @@ protected SearchPhase getNextPhase() { @Override protected void executePhaseOnShard( final SearchShardIterator shardIt, - final SearchShardTarget shard, + final Transport.Connection shard, final SearchActionListener listener ) {} diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 762a7e0f47cab..dda20dfb37e9d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -8,35 +8,65 @@ */ package org.elasticsearch.action.search; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.store.MockDirectoryWrapper; +import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.IdLoader; +import org.elasticsearch.index.mapper.MapperMetrics; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.fetch.FetchSubPhaseProcessor; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.SearchTimeoutException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; +import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.transport.Transport; +import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -749,4 +779,159 @@ private static void addProfiling(boolean profiled, QuerySearchResult queryResult private static ProfileResult fetchProfile(boolean profiled) { return profiled ? new ProfileResult("fetch", "fetch", Map.of(), Map.of(), FETCH_PROFILE_TIME, List.of()) : null; } + + public void testFetchTimeoutWithPartialResults() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + w.addDocument(new Document()); + w.addDocument(new Document()); + w.addDocument(new Document()); + IndexReader r = w.getReader(); + w.close(); + ContextIndexSearcher contextIndexSearcher = createSearcher(r); + try (SearchContext searchContext = createSearchContext(contextIndexSearcher, true)) { + FetchPhase fetchPhase = createFetchPhase(contextIndexSearcher); + fetchPhase.execute(searchContext, new int[] { 0, 1, 2 }, null); + assertTrue(searchContext.queryResult().searchTimedOut()); + assertEquals(1, searchContext.fetchResult().hits().getHits().length); + } finally { + r.close(); + dir.close(); + } + } + + public void testFetchTimeoutNoPartialResults() throws IOException { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + w.addDocument(new Document()); + w.addDocument(new Document()); + w.addDocument(new Document()); + IndexReader r = w.getReader(); + w.close(); + ContextIndexSearcher contextIndexSearcher = createSearcher(r); + + try (SearchContext searchContext = createSearchContext(contextIndexSearcher, false)) { + FetchPhase fetchPhase = createFetchPhase(contextIndexSearcher); + expectThrows(SearchTimeoutException.class, () -> fetchPhase.execute(searchContext, new int[] { 0, 1, 2 }, null)); + assertNull(searchContext.fetchResult().hits()); + } finally { + r.close(); + dir.close(); + } + } + + private static ContextIndexSearcher createSearcher(IndexReader reader) throws IOException { + return new ContextIndexSearcher(reader, null, null, new QueryCachingPolicy() { + @Override + public void onUse(Query query) {} + + @Override + public boolean shouldCache(Query query) { + return false; + } + }, randomBoolean()); + } + + private static FetchPhase createFetchPhase(ContextIndexSearcher contextIndexSearcher) { + return new FetchPhase(Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() { + boolean processCalledOnce = false; + + @Override + public void setNextReader(LeafReaderContext readerContext) {} + + @Override + public void process(FetchSubPhase.HitContext hitContext) { + // we throw only once one doc has been fetched, so we can test partial results are returned + if (processCalledOnce) { + contextIndexSearcher.throwTimeExceededException(); + } else { + processCalledOnce = true; + } + } + + @Override + public StoredFieldsSpec storedFieldsSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + })); + } + + private static SearchContext createSearchContext(ContextIndexSearcher contextIndexSearcher, boolean allowPartialResults) { + IndexSettings indexSettings = new IndexSettings( + IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(System.currentTimeMillis()) + .build(), + Settings.EMPTY + ); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + + SearchExecutionContext searchExecutionContext = new SearchExecutionContext( + 0, + 0, + indexSettings, + bitsetFilterCache, + null, + null, + MappingLookup.EMPTY, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + Collections.emptyMap(), + null, + MapperMetrics.NOOP + ); + TestSearchContext searchContext = new TestSearchContext(searchExecutionContext, null, contextIndexSearcher) { + private final FetchSearchResult fetchSearchResult = new FetchSearchResult(); + private final ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(allowPartialResults), + new ShardId("index", "indexUUID", 0), + 0, + 1, + AliasFilter.EMPTY, + 1f, + 0L, + null + ); + + @Override + public IdLoader newIdLoader() { + return new IdLoader.StoredIdLoader(); + } + + @Override + public FetchSearchResult fetchResult() { + return fetchSearchResult; + } + + @Override + public ShardSearchRequest request() { + return request; + } + }; + searchContext.addReleasable(searchContext.fetchResult()::decRef); + searchContext.setTask(new SearchShardTask(-1, "type", "action", "description", null, Collections.emptyMap())); + return searchContext; + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 03c5d0a06f6fb..484b3c6b386fd 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -147,7 +147,7 @@ public void executeNextPhase(SearchPhase currentPhase, Supplier nex @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection shard, SearchActionListener listener ) { onShardResult(new SearchPhaseResult() { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index f655136cd4ba4..b4ddd48172d01 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; -import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; @@ -119,16 +118,15 @@ public void testSkipSearchShards() throws InterruptedException { @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ) { - seenShard.computeIfAbsent(shard.getShardId(), (i) -> { + seenShard.computeIfAbsent(shardIt.shardId(), (i) -> { numRequests.incrementAndGet(); // only count this once per replica return Boolean.TRUE; }); new Thread(() -> { - Transport.Connection connection = getConnection(null, shard.getNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode() @@ -227,23 +225,22 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ) { - seenShard.computeIfAbsent(shard.getShardId(), (i) -> { + seenShard.computeIfAbsent(shardIt.shardId(), (i) -> { numRequests.incrementAndGet(); // only count this once per shard copy return Boolean.TRUE; }); new Thread(() -> { safeAwait(awaitInitialRequests); - Transport.Connection connection = getConnection(null, shard.getNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode() ); try { - if (shardFailures[shard.getShardId().id()]) { + if (shardFailures[shardIt.shardId().id()]) { listener.onFailure(new RuntimeException()); } else { listener.onResponse(testSearchPhaseResult); @@ -340,11 +337,11 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ) { - assertTrue("shard: " + shard.getShardId() + " has been queried twice", testResponse.queried.add(shard.getShardId())); - Transport.Connection connection = getConnection(null, shard.getNodeId()); + var shardId = shardIt.shardId(); + assertTrue("shard: " + shardId + " has been queried twice", testResponse.queried.add(shardId)); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode() @@ -464,13 +461,13 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ) { - assertTrue("shard: " + shard.getShardId() + " has been queried twice", response.queried.add(shard.getShardId())); - Transport.Connection connection = getConnection(null, shard.getNodeId()); + var shardId = shardIt.shardId(); + assertTrue("shard: " + shardId + " has been queried twice", response.queried.add(shardId)); final TestSearchPhaseResult testSearchPhaseResult; - if (shard.getShardId().id() == 0) { + if (shardId.id() == 0) { testSearchPhaseResult = new TestSearchPhaseResult(null, connection.getNode()); } else { testSearchPhaseResult = new TestSearchPhaseResult( @@ -573,15 +570,14 @@ public void testAllowPartialResults() throws InterruptedException { @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ) { - seenShard.computeIfAbsent(shard.getShardId(), (i) -> { + seenShard.computeIfAbsent(shardIt.shardId(), (i) -> { numRequests.incrementAndGet(); // only count this once per shard copy return Boolean.TRUE; }); new Thread(() -> { - Transport.Connection connection = getConnection(null, shard.getNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode() @@ -673,7 +669,7 @@ public void testSkipUnavailableSearchShards() throws InterruptedException { @Override protected void executePhaseOnShard( SearchShardIterator shardIt, - SearchShardTarget shard, + Transport.Connection connection, SearchActionListener listener ) { assert false : "Expected to skip all shards"; diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 2afb5235ddf82..152e5a9e6eac6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -16,12 +16,9 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; @@ -106,23 +103,6 @@ public void testSerialization() throws Exception { assertNotSame(deserializedRequest, searchRequest); } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // this can be removed when the affected transport version constants are collapsed - public void testSerializationConstants() throws Exception { - SearchRequest searchRequest = createSearchRequest(); - - // something serialized with previous version to remove, should read correctly with the reversion - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setTransportVersion(TransportVersionUtils.getPreviousVersion(TransportVersions.REMOVE_MIN_COMPATIBLE_SHARD_NODE)); - searchRequest.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { - in.setTransportVersion(TransportVersions.REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE); - SearchRequest copiedRequest = new SearchRequest(in); - assertEquals(copiedRequest, searchRequest); - assertEquals(copiedRequest.hashCode(), searchRequest.hashCode()); - } - } - } - public void testSerializationMultiKNN() throws Exception { SearchRequest searchRequest = createSearchRequest(); if (searchRequest.source() == null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index 51796f404c283..d54ac9c66d9a5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -639,7 +639,6 @@ public void testMergeAggs() throws InterruptedException { 10000D, count, InternalAggregations.EMPTY, - false, DocValueFormat.RAW ); InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); @@ -1498,15 +1497,7 @@ private SearchHits createSimpleDeterministicSearchHits(String clusterAlias, Inde private static InternalAggregations createDeterminsticAggregation(String maxAggName, String rangeAggName, double value, int count) { Max max = new Max(maxAggName, value, DocValueFormat.RAW, Collections.emptyMap()); InternalDateRange.Factory factory = new InternalDateRange.Factory(); - InternalDateRange.Bucket bucket = factory.createBucket( - "bucket", - 0D, - 10000D, - count, - InternalAggregations.EMPTY, - false, - DocValueFormat.RAW - ); + InternalDateRange.Bucket bucket = factory.createBucket("bucket", 0D, 10000D, count, InternalAggregations.EMPTY, DocValueFormat.RAW); InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index ed3d26141fe04..a94427cd0df6f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1758,6 +1758,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { new NoneCircuitBreakerService(), transportService, searchService, + null, new SearchTransportService(transportService, client, null), null, clusterService, diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java index a1b9c59571496..303b75098ab67 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java @@ -26,7 +26,7 @@ public void testEmptyRequestBody() throws Exception { .withParams(Map.of("synonymsSet", "testSet", "synonymRuleId", "testRule")) .build(); - FakeRestChannel channel = new FakeRestChannel(request, true, 0); + FakeRestChannel channel = new FakeRestChannel(request, randomBoolean(), 0); try (var threadPool = createThreadPool()) { final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java index 4dce73fcf0e89..915c338195c86 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java @@ -26,7 +26,7 @@ public void testEmptyRequestBody() throws Exception { .withParams(Map.of("synonymsSet", "test")) .build(); - FakeRestChannel channel = new FakeRestChannel(request, true, 0); + FakeRestChannel channel = new FakeRestChannel(request, randomBoolean(), 0); try (var threadPool = createThreadPool()) { final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 9613086aa9f57..668aea70c23f2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -167,7 +167,8 @@ public void testToXContent() throws IOException { "read", "write", "metadata_read", - "metadata_write" + "metadata_write", + "refresh" ] } }, @@ -180,7 +181,8 @@ public void testToXContent() throws IOException { "read", "write", "metadata_read", - "metadata_write" + "metadata_write", + "refresh" ] } } @@ -440,7 +442,8 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "read", "write", "metadata_read", - "metadata_write" + "metadata_write", + "refresh" ] } }, @@ -453,7 +456,8 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "read", "write", "metadata_read", - "metadata_write" + "metadata_write", + "refresh" ] } } @@ -712,7 +716,8 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "read", "write", "metadata_read", - "metadata_write" + "metadata_write", + "refresh" ] } }, @@ -725,7 +730,8 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "read", "write", "metadata_read", - "metadata_write" + "metadata_write", + "refresh" ] } } diff --git a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java index 311f2ec36af5c..0237fff8fdda5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -10,19 +10,22 @@ package org.elasticsearch.cluster.block; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; -import java.util.Arrays; import java.util.Collections; -import java.util.List; +import java.util.EnumSet; import java.util.Map; import static java.util.EnumSet.copyOf; +import static org.elasticsearch.test.TransportVersionUtils.getFirstVersion; +import static org.elasticsearch.test.TransportVersionUtils.getPreviousVersion; import static org.elasticsearch.test.TransportVersionUtils.randomVersion; +import static org.elasticsearch.test.TransportVersionUtils.randomVersionBetween; import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; @@ -36,7 +39,7 @@ public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { TransportVersion version = randomVersion(random()); - ClusterBlock clusterBlock = randomClusterBlock(); + ClusterBlock clusterBlock = randomClusterBlock(version); BytesStreamOutput out = new BytesStreamOutput(); out.setTransportVersion(version); @@ -50,13 +53,41 @@ public void testSerialization() throws Exception { } } + public void testSerializationBwc() throws Exception { + var out = new BytesStreamOutput(); + out.setTransportVersion( + randomVersionBetween(random(), getFirstVersion(), getPreviousVersion(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK)) + ); + + var clusterBlock = randomClusterBlock(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK); + clusterBlock.writeTo(out); + + var in = out.bytes().streamInput(); + in.setTransportVersion(randomVersion()); + + assertClusterBlockEquals( + new ClusterBlock( + clusterBlock.id(), + clusterBlock.uuid(), + clusterBlock.description(), + clusterBlock.retryable(), + clusterBlock.disableStatePersistence(), + clusterBlock.isAllowReleaseResources(), + clusterBlock.status(), + // ClusterBlockLevel.REFRESH should not be sent over the wire to nodes with version < NEW_REFRESH_CLUSTER_BLOCK + ClusterBlock.filterLevels(clusterBlock.levels(), level -> ClusterBlockLevel.REFRESH.equals(level) == false) + ), + new ClusterBlock(in) + ); + } + public void testToStringDanglingComma() { - final ClusterBlock clusterBlock = randomClusterBlock(); + final ClusterBlock clusterBlock = randomClusterBlock(randomVersion(random())); assertThat(clusterBlock.toString(), not(endsWith(","))); } public void testGlobalBlocksCheckedIfNoIndicesSpecified() { - ClusterBlock globalBlock = randomClusterBlock(); + ClusterBlock globalBlock = randomClusterBlock(randomVersion(random())); ClusterBlocks clusterBlocks = new ClusterBlocks(Collections.singleton(globalBlock), Map.of()); ClusterBlockException exception = clusterBlocks.indicesBlockedException(randomFrom(globalBlock.levels()), new String[0]); assertNotNull(exception); @@ -113,9 +144,13 @@ public void testGetIndexBlockWithId() { assertThat(builder.build().getIndexBlockWithId("index", randomValueOtherThan(blockId, ESTestCase::randomInt)), nullValue()); } - private static ClusterBlock randomClusterBlock() { + private static ClusterBlock randomClusterBlock(TransportVersion version) { final String uuid = randomBoolean() ? UUIDs.randomBase64UUID() : null; - final List levels = Arrays.asList(ClusterBlockLevel.values()); + final EnumSet levels = ClusterBlock.filterLevels( + EnumSet.allOf(ClusterBlockLevel.class), + // Filter out ClusterBlockLevel.REFRESH for versions < TransportVersions.NEW_REFRESH_CLUSTER_BLOCK + level -> ClusterBlockLevel.REFRESH.equals(level) == false || version.onOrAfter(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK) + ); return new ClusterBlock( randomInt(), uuid, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java index 4518bd655346a..226f5dbf3b2ff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java @@ -22,10 +22,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistryTests; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -38,6 +42,7 @@ import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.CloseableConnection; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TestTransportChannel; @@ -49,6 +54,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.ToXContent; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -155,6 +161,7 @@ public void doRun() { final var joinValidationService = new JoinValidationService( settings, transportService, + writableRegistry(), () -> usually() ? clusterState : null, clusterState::metadata, List.of() @@ -286,7 +293,14 @@ public void writeTo(StreamOutput out) {} ); // registers request handler - new JoinValidationService(Settings.EMPTY, joiningNodeTransportService, () -> clusterState, clusterState::metadata, List.of()); + new JoinValidationService( + Settings.EMPTY, + joiningNodeTransportService, + writableRegistry(), + () -> clusterState, + clusterState::metadata, + List.of() + ); joiningNodeTransportService.start(); joiningNodeTransportService.acceptIncomingRequests(); @@ -325,6 +339,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req final var joinValidationService = new JoinValidationService( Settings.EMPTY, masterTransportService, + writableRegistry(), () -> clusterState, clusterState::metadata, List.of() @@ -349,7 +364,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req } } - public void testJoinValidationRejectsMismatchedClusterUUID() { + public void testJoinValidationRejectsMismatchedClusterUUID() throws IOException { final var deterministicTaskQueue = new DeterministicTaskQueue(); final var mockTransport = new MockTransport(); final var localNode = DiscoveryNodeUtils.create("node0"); @@ -371,7 +386,14 @@ public void testJoinValidationRejectsMismatchedClusterUUID() { final var settings = Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), dataPath).build(); // registers request handler - new JoinValidationService(settings, transportService, () -> localClusterState, localClusterState::metadata, List.of()); + new JoinValidationService( + settings, + transportService, + writableRegistry(), + () -> localClusterState, + localClusterState::metadata, + List.of() + ); transportService.start(); transportService.acceptIncomingRequests(); @@ -384,7 +406,7 @@ public void testJoinValidationRejectsMismatchedClusterUUID() { transportService.sendRequest( localNode, JoinValidationService.JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(otherClusterState), + serializeClusterState(otherClusterState), new ActionListenerResponseHandler<>(future, in -> TransportResponse.Empty.INSTANCE, TransportResponseHandler.TRANSPORT_WORKER) ); deterministicTaskQueue.runAllTasks(); @@ -401,6 +423,22 @@ public void testJoinValidationRejectsMismatchedClusterUUID() { ); } + private static BytesTransportRequest serializeClusterState(ClusterState clusterState) { + try ( + var bytesStream = new BytesStreamOutput(); + var compressedStream = new OutputStreamStreamOutput( + CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) + ) + ) { + compressedStream.setTransportVersion(TransportVersion.current()); + clusterState.writeTo(compressedStream); + compressedStream.flush(); + return new BytesTransportRequest(ReleasableBytesReference.wrap(bytesStream.bytes()), TransportVersion.current()); + } catch (Exception e) { + throw new AssertionError(e); + } + } + public void testJoinValidationRunsJoinValidators() { final var deterministicTaskQueue = new DeterministicTaskQueue(); final var mockTransport = new MockTransport(); @@ -420,11 +458,12 @@ public void testJoinValidationRunsJoinValidators() { new JoinValidationService( Settings.EMPTY, transportService, + writableRegistry(), () -> localClusterState, localClusterState::metadata, List.of((node, state) -> { assertSame(node, localNode); - assertSame(state, stateForValidation); + assertEquals(state.stateUUID(), stateForValidation.stateUUID()); throw new IllegalStateException("simulated validation failure"); }) ); // registers request handler @@ -435,7 +474,7 @@ public void testJoinValidationRunsJoinValidators() { transportService.sendRequest( localNode, JoinValidationService.JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(stateForValidation), + serializeClusterState(stateForValidation), new ActionListenerResponseHandler<>(future, in -> TransportResponse.Empty.INSTANCE, TransportResponseHandler.TRANSPORT_WORKER) ); deterministicTaskQueue.runAllTasks(); @@ -467,9 +506,16 @@ protected void onSendRequest(long requestId, String action, TransportRequest req null, Collections.emptySet() ); - final var joinValidationService = new JoinValidationService(Settings.EMPTY, masterTransportService, () -> null, () -> { - throw new AssertionError("should not be called"); - }, List.of()); + final var joinValidationService = new JoinValidationService( + Settings.EMPTY, + masterTransportService, + writableRegistry(), + () -> null, + () -> { + throw new AssertionError("should not be called"); + }, + List.of() + ); masterTransportService.start(); masterTransportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java deleted file mode 100644 index 00cfac7248da6..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/features/NodeFeaturesFixupListenerTests.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.features; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.features.NodeFeatures; -import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesRequest; -import org.elasticsearch.action.admin.cluster.node.features.NodesFeaturesResponse; -import org.elasticsearch.action.admin.cluster.node.features.TransportNodesFeaturesAction; -import org.elasticsearch.client.internal.ClusterAdminClient; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.features.NodeFeaturesFixupListener.NodesFeaturesTask; -import org.elasticsearch.cluster.features.NodeFeaturesFixupListener.NodesFeaturesUpdater; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.node.VersionInformation; -import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; -import org.elasticsearch.cluster.service.MasterServiceTaskQueue; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.Scheduler; -import org.mockito.ArgumentCaptor; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Executor; - -import static org.elasticsearch.test.LambdaMatchers.transformedMatch; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.same; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.hamcrest.MockitoHamcrest.argThat; - -public class NodeFeaturesFixupListenerTests extends ESTestCase { - - @SuppressWarnings("unchecked") - private static MasterServiceTaskQueue newMockTaskQueue() { - return mock(MasterServiceTaskQueue.class); - } - - private static DiscoveryNodes nodes(Version... versions) { - var builder = DiscoveryNodes.builder(); - for (int i = 0; i < versions.length; i++) { - builder.add(DiscoveryNodeUtils.create("node" + i, new TransportAddress(TransportAddress.META_ADDRESS, 9200 + i), versions[i])); - } - builder.localNodeId("node0").masterNodeId("node0"); - return builder.build(); - } - - private static DiscoveryNodes nodes(VersionInformation... versions) { - var builder = DiscoveryNodes.builder(); - for (int i = 0; i < versions.length; i++) { - builder.add( - DiscoveryNodeUtils.builder("node" + i) - .address(new TransportAddress(TransportAddress.META_ADDRESS, 9200 + i)) - .version(versions[i]) - .build() - ); - } - builder.localNodeId("node0").masterNodeId("node0"); - return builder.build(); - } - - @SafeVarargs - private static Map> features(Set... nodeFeatures) { - Map> features = new HashMap<>(); - for (int i = 0; i < nodeFeatures.length; i++) { - features.put("node" + i, nodeFeatures[i]); - } - return features; - } - - private static NodesFeaturesResponse getResponse(Map> responseData) { - return new NodesFeaturesResponse( - ClusterName.DEFAULT, - responseData.entrySet() - .stream() - .map( - e -> new NodeFeatures( - e.getValue(), - DiscoveryNodeUtils.create(e.getKey(), new TransportAddress(TransportAddress.META_ADDRESS, 9200)) - ) - ) - .toList(), - List.of() - ); - } - - public void testNothingDoneWhenNothingToFix() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(nodes(Version.CURRENT, Version.CURRENT)) - .nodeFeatures(features(Set.of("f1", "f2"), Set.of("f1", "f2"))) - .build(); - - NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, null, null); - listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - - verify(taskQueue, never()).submitTask(anyString(), any(), any()); - } - - public void testFeaturesFixedAfterNewMaster() throws Exception { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - Set features = Set.of("f1", "f2"); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) - .nodeFeatures(features(features, Set.of(), Set.of())) - .build(); - - ArgumentCaptor> action = ArgumentCaptor.captor(); - ArgumentCaptor task = ArgumentCaptor.captor(); - - NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, null, null); - listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - verify(client).execute( - eq(TransportNodesFeaturesAction.TYPE), - argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), - action.capture() - ); - - action.getValue().onResponse(getResponse(Map.of("node1", features, "node2", features))); - verify(taskQueue).submitTask(anyString(), task.capture(), any()); - - ClusterState newState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( - testState, - new NodesFeaturesUpdater(), - List.of(task.getValue()) - ); - - assertThat(newState.clusterFeatures().allNodeFeatures(), containsInAnyOrder("f1", "f2")); - } - - public void testFeaturesFetchedOnlyForUpdatedNodes() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes( - nodes( - VersionInformation.CURRENT, - VersionInformation.CURRENT, - new VersionInformation(Version.V_8_12_0, IndexVersion.current(), IndexVersion.current()) - ) - ) - .nodeFeatures(features(Set.of("f1", "f2"), Set.of(), Set.of())) - .build(); - - ArgumentCaptor> action = ArgumentCaptor.captor(); - - NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, null, null); - listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - verify(client).execute( - eq(TransportNodesFeaturesAction.TYPE), - argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1"))), - action.capture() - ); - } - - public void testConcurrentChangesDoNotOverlap() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - Set features = Set.of("f1", "f2"); - - ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) - .nodeFeatures(features(features, Set.of(), Set.of())) - .build(); - - NodeFeaturesFixupListener listeners = new NodeFeaturesFixupListener(taskQueue, client, null, null); - listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); - verify(client).execute( - eq(TransportNodesFeaturesAction.TYPE), - argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), - any() - ); - // don't send back the response yet - - ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) - .nodeFeatures(features(features, features, Set.of())) - .build(); - // should not send any requests - listeners.clusterChanged(new ClusterChangedEvent("test", testState2, testState1)); - verifyNoMoreInteractions(client); - } - - public void testFailedRequestsAreRetried() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - Scheduler scheduler = mock(Scheduler.class); - Executor executor = mock(Executor.class); - Set features = Set.of("f1", "f2"); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(nodes(Version.CURRENT, Version.CURRENT, Version.CURRENT)) - .nodeFeatures(features(features, Set.of(), Set.of())) - .build(); - - ArgumentCaptor> action = ArgumentCaptor.captor(); - ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); - - NodeFeaturesFixupListener listener = new NodeFeaturesFixupListener(taskQueue, client, scheduler, executor); - listener.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - verify(client).execute( - eq(TransportNodesFeaturesAction.TYPE), - argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), - action.capture() - ); - - action.getValue().onFailure(new RuntimeException("failure")); - verify(scheduler).schedule(retry.capture(), any(), same(executor)); - - // running the retry should cause another call - retry.getValue().run(); - verify(client, times(2)).execute( - eq(TransportNodesFeaturesAction.TYPE), - argThat(transformedMatch(NodesFeaturesRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), - action.capture() - ); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index 6be5b48f9d723..57c360dc6a92a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -10,163 +10,90 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.DateMathExpressionResolver; -import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; import java.util.Locale; +import java.util.function.LongSupplier; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class DateMathExpressionResolverTests extends ESTestCase { - private final Context context = new Context( - ClusterState.builder(new ClusterName("_name")).build(), - IndicesOptions.strictExpand(), - SystemIndexAccessLevel.NONE - ); + private final long now = randomMillisUpToYear9999(); + private final LongSupplier getTime = () -> now; - private static ZonedDateTime dateFromMillis(long millis) { - return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC); - } + public void testNoDateMathExpression() { + String expression = randomAlphaOfLength(10); + assertThat(DateMathExpressionResolver.resolveExpression(expression, getTime), equalTo(expression)); - private static String formatDate(String pattern, ZonedDateTime zonedDateTime) { - DateTimeFormatter dateFormatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); - return dateFormatter.format(zonedDateTime); + expression = "*"; + assertThat(DateMathExpressionResolver.resolveExpression(expression, getTime), equalTo(expression)); } - public void testNormal() throws Exception { - int numIndexExpressions = randomIntBetween(1, 9); - List indexExpressions = new ArrayList<>(numIndexExpressions); - for (int i = 0; i < numIndexExpressions; i++) { - indexExpressions.add(randomAlphaOfLength(10)); - } - List result = DateMathExpressionResolver.resolve(context, indexExpressions); - assertThat(result.size(), equalTo(indexExpressions.size())); - for (int i = 0; i < indexExpressions.size(); i++) { - assertThat(result.get(i), equalTo(indexExpressions.get(i))); - } - } + public void testExpression() { + String result = DateMathExpressionResolver.resolveExpression("<.marvel-{now}>", getTime); + assertThat(result, equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); - public void testExpression() throws Exception { - List indexExpressions = Arrays.asList("<.marvel-{now}>", "<.watch_history-{now}>", ""); - List result = DateMathExpressionResolver.resolve(context, indexExpressions); - assertThat(result.size(), equalTo(3)); - assertThat(result.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(1), equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(2), equalTo("logstash-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + result = DateMathExpressionResolver.resolveExpression("<.watch_history-{now}>", getTime); + assertThat(result, equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); + + result = DateMathExpressionResolver.resolveExpression("", getTime); + assertThat(result, equalTo("logstash-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); } public void testExpressionWithWildcardAndExclusions() { - List indexExpressions = Arrays.asList( - "<-before-inner-{now}>", - "-", - "", - "<-after-inner-{now}>", - "-" - ); - List result = DateMathExpressionResolver.resolve(context, indexExpressions); - assertThat( - result, - Matchers.contains( - equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), - equalTo("-"), // doesn't evaluate because it doesn't start with "<" and it is not an exclusion - equalTo("wild*card-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())) + "*"), - equalTo("-after-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), - equalTo("-after-outer-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))) - ) - ); - Context noWildcardExpandContext = new Context( - ClusterState.builder(new ClusterName("_name")).build(), - IndicesOptions.strictSingleIndexNoExpandForbidClosed(), - SystemIndexAccessLevel.NONE - ); - result = DateMathExpressionResolver.resolve(noWildcardExpandContext, indexExpressions); - assertThat( - result, - Matchers.contains( - equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), - // doesn't evaluate because it doesn't start with "<" and there can't be exclusions without wildcard expansion - equalTo("-"), - equalTo("wild*card-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())) + "*"), - equalTo("-after-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), - // doesn't evaluate because it doesn't start with "<" and there can't be exclusions without wildcard expansion - equalTo("-") - ) - ); - } + String result = DateMathExpressionResolver.resolveExpression("<-before-inner-{now}>", getTime); + assertThat(result, equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); + + result = DateMathExpressionResolver.resolveExpression("", getTime); + assertThat(result, equalTo("wild*card-" + formatDate("uuuu.MM.dd", dateFromMillis(now)) + "*")); + + result = DateMathExpressionResolver.resolveExpression("<-after-inner-{now}>", getTime); + assertThat(result, equalTo("-after-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); - public void testEmpty() throws Exception { - List result = DateMathExpressionResolver.resolve(context, Collections.emptyList()); - assertThat(result.size(), equalTo(0)); } - public void testExpression_Static() throws Exception { - List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-test>")); - assertThat(result.size(), equalTo(1)); - assertThat(result.get(0), equalTo(".marvel-test")); + public void testExpression_Static() { + String result = DateMathExpressionResolver.resolveExpression("<.marvel-test>", getTime); + assertThat(result, equalTo(".marvel-test")); } - public void testExpression_MultiParts() throws Exception { - List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.text1-{now/d}-text2-{now/M}>")); - assertThat(result.size(), equalTo(1)); + public void testExpression_MultiParts() { + String result = DateMathExpressionResolver.resolveExpression("<.text1-{now/d}-text2-{now/M}>", getTime); assertThat( - result.get(0), + result, equalTo( ".text1-" - + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())) + + formatDate("uuuu.MM.dd", dateFromMillis(now)) + "-text2-" - + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()).withDayOfMonth(1)) + + formatDate("uuuu.MM.dd", dateFromMillis(now).withDayOfMonth(1)) ) ); } - public void testExpression_CustomFormat() throws Exception { - List results = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); - assertThat(results.size(), equalTo(1)); - assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - } - - public void testExpression_EscapeStatic() throws Exception { - List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); - assertThat(result.size(), equalTo(1)); - assertThat(result.get(0), equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + public void testExpression_CustomFormat() { + String result = DateMathExpressionResolver.resolveExpression("<.marvel-{now/d{yyyy.MM.dd}}>", getTime); + assertThat(result, equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); } - public void testExpression_EscapeDateFormat() throws Exception { - List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); - assertThat(result.size(), equalTo(1)); - assertThat(result.get(0), equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(context.getStartTime())))); + public void testExpression_EscapeStatic() { + String result = DateMathExpressionResolver.resolveExpression("<.mar\\{v\\}el-{now/d}>", getTime); + assertThat(result, equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(now)))); } - public void testExpression_MixedArray() throws Exception { - List result = DateMathExpressionResolver.resolve( - context, - Arrays.asList("name1", "<.marvel-{now/d}>", "name2", "<.logstash-{now/M{uuuu.MM}}>") - ); - assertThat(result.size(), equalTo(4)); - assertThat(result.get(0), equalTo("name1")); - assertThat(result.get(1), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(2), equalTo("name2")); - assertThat(result.get(3), equalTo(".logstash-" + formatDate("uuuu.MM", dateFromMillis(context.getStartTime()).withDayOfMonth(1)))); + public void testExpression_EscapeDateFormat() { + String result = DateMathExpressionResolver.resolveExpression("<.marvel-{now/d{'\\{year\\}'yyyy}}>", getTime); + assertThat(result, equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(now)))); } - public void testExpression_CustomTimeZoneInIndexName() throws Exception { + public void testExpression_CustomTimeZoneInIndexName() { ZoneId timeZone; int hoursOffset; int minutesOffset = 0; @@ -194,57 +121,57 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { // rounding to today 00:00 now = ZonedDateTime.now(ZoneOffset.UTC).withHour(0).withMinute(0).withSecond(0); } - Context context = new Context( - this.context.getState(), - this.context.getOptions(), - now.toInstant().toEpochMilli(), - SystemIndexAccessLevel.NONE, - name -> false, - name -> false - ); - List results = DateMathExpressionResolver.resolve( - context, - Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>") + + String result = DateMathExpressionResolver.resolveExpression( + "<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>", + () -> now.toInstant().toEpochMilli() ); - assertThat(results.size(), equalTo(1)); - logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0)); - assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); + logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, result); + assertThat(result, equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); } - public void testExpressionInvalidUnescaped() throws Exception { + public void testExpressionInvalidUnescaped() { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")) + () -> DateMathExpressionResolver.resolveExpression("<.mar}vel-{now/d}>", getTime) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("invalid character at position [")); } - public void testExpressionInvalidDateMathFormat() throws Exception { + public void testExpressionInvalidDateMathFormat() { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")) + () -> DateMathExpressionResolver.resolveExpression("<.marvel-{now/d{}>", getTime) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } - public void testExpressionInvalidEmptyDateMathFormat() throws Exception { + public void testExpressionInvalidEmptyDateMathFormat() { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")) + () -> DateMathExpressionResolver.resolveExpression("<.marvel-{now/d{}}>", getTime) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("missing date format")); } - public void testExpressionInvalidOpenEnded() throws Exception { + public void testExpressionInvalidOpenEnded() { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")) + () -> DateMathExpressionResolver.resolveExpression("<.marvel-{now/d>", getTime) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } + static ZonedDateTime dateFromMillis(long millis) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC); + } + + static String formatDate(String pattern, ZonedDateTime zonedDateTime) { + DateTimeFormatter dateFormatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); + return dateFormatter.format(zonedDateTime); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java index 41651d52ceb9f..0e4b8271ceac7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodeTests.java @@ -185,38 +185,6 @@ public void testNodeCPUsRoundUp() { } } - public void testDesiredNodeHasRangeFloatProcessors() { - final var settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), randomAlphaOfLength(10)).build(); - - { - final var desiredNode = new DesiredNode( - settings, - new DesiredNode.ProcessorsRange(0.4, 1.2), - ByteSizeValue.ofGb(1), - ByteSizeValue.ofGb(1) - ); - assertThat(desiredNode.clusterHasRequiredFeatures(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED::equals), is(true)); - assertThat(desiredNode.clusterHasRequiredFeatures(nf -> false), is(false)); - } - - { - final var desiredNode = new DesiredNode( - settings, - randomIntBetween(0, 10) + randomDoubleBetween(0.00001, 0.99999, true), - ByteSizeValue.ofGb(1), - ByteSizeValue.ofGb(1) - ); - assertThat(desiredNode.clusterHasRequiredFeatures(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED::equals), is(true)); - assertThat(desiredNode.clusterHasRequiredFeatures(nf -> false), is(false)); - } - - { - final var desiredNode = new DesiredNode(settings, 2.0f, ByteSizeValue.ofGb(1), ByteSizeValue.ofGb(1)); - assertThat(desiredNode.clusterHasRequiredFeatures(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED::equals), is(true)); - assertThat(desiredNode.clusterHasRequiredFeatures(nf -> false), is(true)); - } - } - public void testEqualsOrProcessorsCloseTo() { final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), randomAlphaOfLength(10)).build(); final double maxDelta = 1E-3; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 99470918ce063..30895767c33c2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Predicates; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -47,6 +48,7 @@ import java.time.LocalDate; import java.time.ZoneOffset; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -58,6 +60,8 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createFailureStore; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; +import static org.elasticsearch.cluster.metadata.DateMathExpressionResolverTests.dateFromMillis; +import static org.elasticsearch.cluster.metadata.DateMathExpressionResolverTests.formatDate; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.indices.SystemIndices.EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY; @@ -885,10 +889,7 @@ public void testConcreteIndicesIgnoreIndicesEmptyRequest() { IndicesOptions.lenientExpandOpen(), SystemIndexAccessLevel.NONE ); - assertThat( - newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[] {})), - equalTo(newHashSet("kuku", "testXXX")) - ); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context)), equalTo(newHashSet("kuku", "testXXX"))); } public void testConcreteIndicesNoIndicesErrorMessage() { @@ -1408,52 +1409,56 @@ public void testConcreteIndicesWildcardNoMatch() { } } - public void testIsAllIndicesNull() throws Exception { + public void testIsAllIndicesNull() { assertThat(IndexNameExpressionResolver.isAllIndices(null), equalTo(true)); } - public void testIsAllIndicesEmpty() throws Exception { - assertThat(IndexNameExpressionResolver.isAllIndices(Collections.emptyList()), equalTo(true)); + public void testIsAllIndicesEmpty() { + assertThat(IndexNameExpressionResolver.isAllIndices(List.of()), equalTo(true)); + } + + public void testIsAllIndicesExplicitAll() { + assertThat(IndexNameExpressionResolver.isAllIndices(List.of("_all")), equalTo(true)); } - public void testIsAllIndicesExplicitAll() throws Exception { - assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("_all")), equalTo(true)); + public void testIsAllIndicesExplicitAllPlusOther() { + assertThat(IndexNameExpressionResolver.isAllIndices(List.of("_all", "other")), equalTo(false)); } - public void testIsAllIndicesExplicitAllPlusOther() throws Exception { - assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("_all", "other")), equalTo(false)); + public void testIsNoneIndices() { + assertThat(IndexNameExpressionResolver.isNoneExpression(new String[] { "*", "-*" }), equalTo(true)); } - public void testIsAllIndicesNormalIndexes() throws Exception { - assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("index1", "index2", "index3")), equalTo(false)); + public void testIsAllIndicesNormalIndexes() { + assertThat(IndexNameExpressionResolver.isAllIndices(List.of("index1", "index2", "index3")), equalTo(false)); } - public void testIsAllIndicesWildcard() throws Exception { - assertThat(IndexNameExpressionResolver.isAllIndices(Arrays.asList("*")), equalTo(false)); + public void testIsAllIndicesWildcard() { + assertThat(IndexNameExpressionResolver.isAllIndices(List.of("*")), equalTo(false)); } - public void testIsExplicitAllIndicesNull() throws Exception { + public void testIsExplicitAllIndicesNull() { assertThat(IndexNameExpressionResolver.isExplicitAllPattern(null), equalTo(false)); } - public void testIsExplicitAllIndicesEmpty() throws Exception { - assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Collections.emptyList()), equalTo(false)); + public void testIsExplicitAllIndicesEmpty() { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(List.of()), equalTo(false)); } - public void testIsExplicitAllIndicesExplicitAll() throws Exception { - assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("_all")), equalTo(true)); + public void testIsExplicitAllIndicesExplicitAll() { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(List.of("_all")), equalTo(true)); } - public void testIsExplicitAllIndicesExplicitAllPlusOther() throws Exception { - assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("_all", "other")), equalTo(false)); + public void testIsExplicitAllIndicesExplicitAllPlusOther() { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(List.of("_all", "other")), equalTo(false)); } - public void testIsExplicitAllIndicesNormalIndexes() throws Exception { - assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("index1", "index2", "index3")), equalTo(false)); + public void testIsExplicitAllIndicesNormalIndexes() { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(List.of("index1", "index2", "index3")), equalTo(false)); } - public void testIsExplicitAllIndicesWildcard() throws Exception { - assertThat(IndexNameExpressionResolver.isExplicitAllPattern(Arrays.asList("*")), equalTo(false)); + public void testIsExplicitAllIndicesWildcard() { + assertThat(IndexNameExpressionResolver.isExplicitAllPattern(List.of("*")), equalTo(false)); } public void testIndexOptionsFailClosedIndicesAndAliases() { @@ -1580,16 +1585,13 @@ public void testResolveExpressions() { .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetadata.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - assertEquals(new HashSet<>(Arrays.asList("alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "alias-*")); + assertEquals(Set.of("alias-0", "alias-1"), indexNameExpressionResolver.resolveExpressions(state, "alias-*")); + assertEquals(Set.of("test-0", "alias-0", "alias-1"), indexNameExpressionResolver.resolveExpressions(state, "test-0", "alias-*")); assertEquals( - new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")), - indexNameExpressionResolver.resolveExpressions(state, "test-0", "alias-*") - ); - assertEquals( - new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")), + Set.of("test-0", "test-1", "alias-0", "alias-1"), indexNameExpressionResolver.resolveExpressions(state, "test-*", "alias-*") ); - assertEquals(new HashSet<>(Arrays.asList("test-1", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "*-1")); + assertEquals(Set.of("test-1", "alias-1"), indexNameExpressionResolver.resolveExpressions(state, "*-1")); } public void testFilteringAliases() { @@ -1598,16 +1600,16 @@ public void testFilteringAliases() { .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetadata.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = new HashSet<>(Arrays.asList("alias-0", "alias-1")); + Set resolvedExpressions = Set.of("alias-0", "alias-1"); String[] strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertArrayEquals(new String[] { "alias-0" }, strings); // concrete index supersedes filtering alias - resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")); + resolvedExpressions = Set.of("test-0", "alias-0", "alias-1"); strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); - resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")); + resolvedExpressions = Set.of("test-0", "test-1", "alias-0", "alias-1"); strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); } @@ -1742,7 +1744,7 @@ public void testIndexAliasesSkipIdentity() { ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-alias")); + Set resolvedExpressions = Set.of("test-0", "test-alias"); String[] aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, false, resolvedExpressions); assertNull(aliases); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); @@ -1769,7 +1771,7 @@ public void testConcreteWriteIndexSuccessful() { x -> true, x -> true, true, - new HashSet<>(Arrays.asList("test-0", "test-alias")) + Set.of("test-0", "test-alias") ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1851,7 +1853,7 @@ public void testConcreteWriteIndexWithWildcardExpansion() { x -> true, x -> true, true, - new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) + Set.of("test-0", "test-1", "test-alias") ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1889,7 +1891,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithSingleIndex() { x -> true, x -> true, true, - new HashSet<>(Arrays.asList("test-0", "test-alias")) + Set.of("test-0", "test-alias") ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1925,7 +1927,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithMultipleIndices() { x -> true, x -> true, true, - new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) + Set.of("test-0", "test-1", "test-alias") ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1966,7 +1968,7 @@ public void testAliasResolutionNotAllowingMultipleIndices() { x -> true, x -> true, true, - new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) + Set.of("test-0", "test-1", "test-alias") ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -2328,40 +2330,40 @@ public void testFullWildcardSystemIndexResolutionWithExpandHiddenAllowed() { SearchRequest request = new SearchRequest(randomFrom("*", "_all")); request.indicesOptions(IndicesOptions.strictExpandHidden()); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder("some-other-index", ".ml-stuff", ".ml-meta", ".watches")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder("some-other-index", ".ml-stuff", ".ml-meta", ".watches")); } public void testWildcardSystemIndexResolutionMultipleMatchesAllowed() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(".w*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder(".watches")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder(".watches")); } public void testWildcardSystemIndexResolutionSingleMatchAllowed() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(".ml-*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder(".ml-meta", ".ml-stuff")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder(".ml-meta", ".ml-stuff")); } public void testSingleSystemIndexResolutionAllowed() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(".ml-meta"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder(".ml-meta")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder(".ml-meta")); } public void testFullWildcardSystemIndicesAreHidden() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(randomFrom("*", "_all")); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder("some-other-index")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining("some-other-index")); } public void testFullWildcardSystemIndexResolutionDeprecated() { @@ -2370,8 +2372,8 @@ public void testFullWildcardSystemIndexResolutionDeprecated() { SearchRequest request = new SearchRequest(randomFrom("*", "_all")); request.indicesOptions(IndicesOptions.strictExpandHidden()); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder("some-other-index", ".ml-stuff", ".ml-meta", ".watches")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder("some-other-index", ".ml-stuff", ".ml-meta", ".watches")); assertWarnings( true, new DeprecationWarning( @@ -2388,8 +2390,8 @@ public void testSingleSystemIndexResolutionDeprecated() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(".ml-meta"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder(".ml-meta")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".ml-meta")); assertWarnings( true, new DeprecationWarning( @@ -2405,8 +2407,8 @@ public void testWildcardSystemIndexResolutionSingleMatchDeprecated() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(".w*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder(".watches")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder(".watches")); assertWarnings( true, new DeprecationWarning( @@ -2423,8 +2425,8 @@ public void testWildcardSystemIndexResolutionMultipleMatchesDeprecated() { ClusterState state = systemIndexTestClusterState(); SearchRequest request = new SearchRequest(".ml-*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, containsInAnyOrder(".ml-meta", ".ml-stuff")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContainingInAnyOrder(".ml-meta", ".ml-stuff")); assertWarnings( true, new DeprecationWarning( @@ -2479,8 +2481,8 @@ public void testExternalSystemIndexAccess() { threadContext.putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.FALSE.toString()); SearchRequest request = new SearchRequest(".external-*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, contains(".external-sys-idx")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".external-sys-idx")); assertWarnings( true, new DeprecationWarning( @@ -2496,8 +2498,8 @@ public void testExternalSystemIndexAccess() { threadContext.putHeader(SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, Boolean.FALSE.toString()); SearchRequest request = new SearchRequest(".external-sys-idx"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, contains(".external-sys-idx")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".external-sys-idx")); assertWarnings( true, new DeprecationWarning( @@ -2515,8 +2517,8 @@ public void testExternalSystemIndexAccess() { threadContext.putHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, "stack-component"); SearchRequest request = new SearchRequest(".external-*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, contains(".external-sys-idx")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".external-sys-idx")); assertWarnings(); } } @@ -2526,8 +2528,8 @@ public void testExternalSystemIndexAccess() { threadContext.putHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, "stack-component"); SearchRequest request = new SearchRequest(".external-sys-idx"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, contains(".external-sys-idx")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".external-sys-idx")); assertWarnings(); } } @@ -2538,8 +2540,8 @@ public void testExternalSystemIndexAccess() { threadContext.putHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, "other"); SearchRequest request = new SearchRequest(".external-*"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, contains(".external-sys-idx")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".external-sys-idx")); assertWarnings(); } } @@ -2549,8 +2551,8 @@ public void testExternalSystemIndexAccess() { threadContext.putHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY, "other"); SearchRequest request = new SearchRequest(".external-sys-idx"); - List indexNames = resolveConcreteIndexNameList(state, request); - assertThat(indexNames, contains(".external-sys-idx")); + String[] indexNames = indexNameExpressionResolver.concreteIndexNames(state, request); + assertThat(indexNames, arrayContaining(".external-sys-idx")); assertWarnings(); } } @@ -3073,7 +3075,6 @@ public void testDataStreamsWithWildcardExpression() { assertThat(result[1].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStream1, 2, epochMillis))); assertThat(result[2].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStream2, 1, epochMillis))); assertThat(result[3].getName(), equalTo(DataStream.getDefaultBackingIndexName(dataStream2, 2, epochMillis))); - ; } { IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; @@ -3239,6 +3240,37 @@ public void testDataStreamsNames() { assertThat(names, empty()); } + public void testDateMathMixedArray() { + long now = System.currentTimeMillis(); + String dataMathIndex1 = ".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(now)); + String dateMathIndex2 = ".logstash-" + formatDate("uuuu.MM", dateFromMillis(now).withDayOfMonth(1)); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( + ClusterState.builder(new ClusterName("_name")) + .metadata( + Metadata.builder() + .put(indexBuilder("name1")) + .put(indexBuilder("name2")) + .put(indexBuilder(dataMathIndex1)) + .put(indexBuilder(dateMathIndex2)) + ) + .build(), + IndicesOptions.strictExpand(), + now, + SystemIndexAccessLevel.NONE, + Predicates.never(), + Predicates.never() + ); + Collection result = IndexNameExpressionResolver.resolveExpressionsToResources( + context, + "name1", + "<.marvel-{now/d}>", + "name2", + "<.logstash-{now/M{uuuu.MM}}>" + ); + assertThat(result.size(), equalTo(4)); + assertThat(result, contains("name1", dataMathIndex1, "name2", dateMathIndex2)); + } + public void testMathExpressionSupport() { Instant instant = LocalDate.of(2021, 01, 11).atStartOfDay().toInstant(ZoneOffset.UTC); String resolved = IndexNameExpressionResolver.resolveDateMathExpression("", instant.toEpochMilli()); @@ -3418,10 +3450,6 @@ private ClusterState systemIndexTestClusterState() { return ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); } - private List resolveConcreteIndexNameList(ClusterState state, SearchRequest request) { - return Arrays.stream(indexNameExpressionResolver.concreteIndices(state, request)).map(Index::getName).toList(); - } - private static IndexMetadata.Builder indexBuilder(String index, Settings additionalSettings) { return IndexMetadata.builder(index).settings(indexSettings(IndexVersion.current(), 1, 0).put(additionalSettings)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 96a74d2e23aad..1876a1f2da556 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -66,6 +67,7 @@ import org.elasticsearch.snapshots.EmptySnapshotsInfoService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; @@ -105,6 +107,8 @@ import static org.elasticsearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.indices.ShardLimitValidatorTests.createTestShardLimitService; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -1133,7 +1137,7 @@ public void testClusterStateCreateIndexThrowsWriteIndexValidationException() thr assertThat( expectThrows( IllegalStateException.class, - () -> clusterStateCreateIndex(currentClusterState, newIndex, null, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) + () -> clusterStateCreateIndex(currentClusterState, newIndex, null, null, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) ).getMessage(), startsWith("alias [alias1] has more than one write index [") ); @@ -1153,6 +1157,7 @@ public void testClusterStateCreateIndex() { currentClusterState, newIndexMetadata, null, + null, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY ); assertThat(updatedClusterState.blocks().getIndexBlockWithId("test", INDEX_READ_ONLY_BLOCK.id()), is(INDEX_READ_ONLY_BLOCK)); @@ -1198,6 +1203,7 @@ public void testClusterStateCreateIndexWithMetadataTransaction() { currentClusterState, newIndexMetadata, metadataTransformer, + null, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY ); assertTrue(updatedClusterState.metadata().findAllAliases(new String[] { "my-index" }).containsKey("my-index")); @@ -1547,6 +1553,84 @@ public void testDeprecateSimpleFS() { ); } + public void testClusterStateCreateIndexWithClusterBlockTransformer() { + { + var emptyClusterState = ClusterState.builder(ClusterState.EMPTY_STATE).build(); + var updatedClusterState = clusterStateCreateIndex( + emptyClusterState, + IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(randomIntBetween(1, 3)) + .build(), + null, + MetadataCreateIndexService.createClusterBlocksTransformerForIndexCreation(Settings.EMPTY), + TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY + ); + assertThat(updatedClusterState.blocks().indices(), is(anEmptyMap())); + assertThat(updatedClusterState.blocks().hasIndexBlock("test", IndexMetadata.INDEX_REFRESH_BLOCK), is(false)); + assertThat(updatedClusterState.routingTable().index("test"), is(notNullValue())); + } + { + var minTransportVersion = TransportVersionUtils.randomCompatibleVersion(random()); + var emptyClusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("_node_id")).build()) + .putCompatibilityVersions("_node_id", new CompatibilityVersions(minTransportVersion, Map.of())) + .build(); + var settings = Settings.builder() + .put(DiscoveryNode.STATELESS_ENABLED_SETTING_NAME, true) + .put(MetadataCreateIndexService.USE_INDEX_REFRESH_BLOCK_SETTING_NAME, true) + .build(); + int nbReplicas = randomIntBetween(0, 1); + var updatedClusterState = clusterStateCreateIndex( + emptyClusterState, + IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(nbReplicas) + .build() + .withTimestampRanges(IndexLongFieldRange.UNKNOWN, IndexLongFieldRange.UNKNOWN, minTransportVersion), + null, + MetadataCreateIndexService.createClusterBlocksTransformerForIndexCreation(settings), + TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY + ); + + var expectRefreshBlock = 0 < nbReplicas && minTransportVersion.onOrAfter(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK); + assertThat(updatedClusterState.blocks().indices(), is(aMapWithSize(expectRefreshBlock ? 1 : 0))); + assertThat(updatedClusterState.blocks().hasIndexBlock("test", IndexMetadata.INDEX_REFRESH_BLOCK), is(expectRefreshBlock)); + assertThat(updatedClusterState.routingTable().index("test"), is(notNullValue())); + } + } + + public void testCreateClusterBlocksTransformerForIndexCreation() { + boolean isStateless = randomBoolean(); + boolean useRefreshBlock = randomBoolean(); + var minTransportVersion = TransportVersionUtils.randomCompatibleVersion(random()); + + var applier = MetadataCreateIndexService.createClusterBlocksTransformerForIndexCreation( + Settings.builder() + .put(DiscoveryNode.STATELESS_ENABLED_SETTING_NAME, isStateless) + .put(MetadataCreateIndexService.USE_INDEX_REFRESH_BLOCK_SETTING_NAME, useRefreshBlock) + .build() + ); + assertThat(applier, notNullValue()); + + var blocks = ClusterBlocks.builder().blocks(ClusterState.EMPTY_STATE.blocks()); + applier.apply( + blocks, + IndexMetadata.builder("test") + .settings(settings(IndexVersion.current())) + .numberOfShards(1) + .numberOfReplicas(randomIntBetween(1, 3)) + .build(), + minTransportVersion + ); + assertThat( + blocks.hasIndexBlock("test", IndexMetadata.INDEX_REFRESH_BLOCK), + is(isStateless && useRefreshBlock && minTransportVersion.onOrAfter(TransportVersions.NEW_REFRESH_CLUSTER_BLOCK)) + ); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 982394ca31b1c..6a26e7948784c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -13,23 +13,20 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata.State; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.test.ESTestCase; -import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.function.Predicate; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class WildcardExpressionResolverTests extends ESTestCase { @@ -50,107 +47,31 @@ public void testConvertWildcardsJustIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testXXX"))), - equalTo(newHashSet("testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "ku*")), + equalTo(newHashSet("kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), - equalTo(newHashSet("testXXX", "testYYY")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "ku*"))), - equalTo(newHashSet("testXXX", "kuku")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "test*")), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "testX*")), equalTo(newHashSet("testXXX", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "kuku"))), - equalTo(newHashSet("testXXX", "testXYY", "kuku")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "*")), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")) ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("*", "-kuku"))), - equalTo(newHashSet("testXXX", "testXYY", "testYYY")) - ); - assertThat( - newHashSet( - IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - context, - Arrays.asList("testX*", "-doe", "-testXXX", "-testYYY") - ) - ), - equalTo(newHashSet("testXYY")) - ); - if (indicesOptions == IndicesOptions.lenientExpandOpen()) { - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), - equalTo(newHashSet("testXXX", "-testXXX")) - ); - } else if (indicesOptions == IndicesOptions.strictExpandOpen()) { - IndexNotFoundException infe = expectThrows( - IndexNotFoundException.class, - () -> IndexNameExpressionResolver.resolveExpressions(context, "testXXX", "-testXXX") - ); - assertEquals("-testXXX", infe.getIndex().getName()); - } - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), - equalTo(newHashSet("testXXX")) - ); - } - - public void testConvertWildcardsTests() { - Metadata.Builder mdBuilder = Metadata.builder() - .put(indexBuilder("testXXX").putAlias(AliasMetadata.builder("alias1")).putAlias(AliasMetadata.builder("alias2"))) - .put(indexBuilder("testXYY").putAlias(AliasMetadata.builder("alias2"))) - .put(indexBuilder("testYYY").putAlias(AliasMetadata.builder("alias3"))) - .put(indexBuilder("kuku")); - ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( - state, - IndicesOptions.lenientExpandOpen(), - SystemIndexAccessLevel.NONE - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYY*", "alias*"))), - equalTo(newHashSet("testXXX", "testXYY", "testYYY")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("-kuku"))), - equalTo(newHashSet("-kuku")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("test*", "-testYYY"))), - equalTo(newHashSet("testXXX", "testXYY")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "testYYY"))), - equalTo(newHashSet("testXXX", "testXYY", "testYYY")) - ); - assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYYY", "testX*"))), - equalTo(newHashSet("testXXX", "testXYY", "testYYY")) - ); } public void testConvertWildcardsOpenClosedIndicesTests() { Metadata.Builder mdBuilder = Metadata.builder() - .put(indexBuilder("testXXX").state(IndexMetadata.State.OPEN)) - .put(indexBuilder("testXXY").state(IndexMetadata.State.OPEN)) - .put(indexBuilder("testXYY").state(IndexMetadata.State.CLOSE)) - .put(indexBuilder("testYYY").state(IndexMetadata.State.OPEN)) - .put(indexBuilder("testYYX").state(IndexMetadata.State.CLOSE)) - .put(indexBuilder("kuku").state(IndexMetadata.State.OPEN)); + .put(indexBuilder("testXXX").state(State.OPEN)) + .put(indexBuilder("testXXY").state(State.OPEN)) + .put(indexBuilder("testXYY").state(State.CLOSE)) + .put(indexBuilder("testYYY").state(State.OPEN)) + .put(indexBuilder("testYYX").state(State.CLOSE)) + .put(indexBuilder("kuku").state(State.OPEN)); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context( @@ -159,7 +80,7 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); context = new IndexNameExpressionResolver.Context( @@ -168,7 +89,7 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "testX*")), equalTo(newHashSet("testXYY")) ); context = new IndexNameExpressionResolver.Context( @@ -177,26 +98,9 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY")) ); - context = new IndexNameExpressionResolver.Context( - state, - IndicesOptions.fromOptions(true, true, false, false), - SystemIndexAccessLevel.NONE - ); - assertThat(IndexNameExpressionResolver.resolveExpressions(context, "testX*").size(), equalTo(0)); - context = new IndexNameExpressionResolver.Context( - state, - IndicesOptions.fromOptions(false, true, false, false), - SystemIndexAccessLevel.NONE - ); - IndexNameExpressionResolver.Context finalContext = context; - IndexNotFoundException infe = expectThrows( - IndexNotFoundException.class, - () -> IndexNameExpressionResolver.resolveExpressions(finalContext, "testX*") - ); - assertThat(infe.getIndex().getName(), is("testX*")); } // issue #13334 @@ -217,28 +121,27 @@ public void testMultipleWildcards() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "test*X*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*Y"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "test*X*Y")), equalTo(newHashSet("testXXY", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("kuku*Y*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "kuku*Y*")), equalTo(newHashSet("kukuYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*"))), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "*Y*")), equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*Y*X"))) - .size(), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "test*Y*X")).size(), equalTo(0) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*X"))).size(), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, "*Y*X")).size(), equalTo(0) ); } @@ -259,26 +162,6 @@ public void testAll() { newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); - assertThat( - newHashSet(IndexNameExpressionResolver.resolveExpressions(context, "_all")), - equalTo(newHashSet("testXXX", "testXYY", "testYYY")) - ); - IndicesOptions noExpandOptions = IndicesOptions.fromOptions( - randomBoolean(), - true, - false, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - IndexNameExpressionResolver.Context noExpandContext = new IndexNameExpressionResolver.Context( - state, - noExpandOptions, - SystemIndexAccessLevel.NONE - ); - assertThat(IndexNameExpressionResolver.resolveExpressions(noExpandContext, "_all").size(), equalTo(0)); } public void testAllAliases() { @@ -506,112 +389,47 @@ public void testResolveAliases() { ); { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAndAliasesContext, - Collections.singletonList("foo_a*") + "foo_a*" ); assertThat(indices, containsInAnyOrder("foo_index", "bar_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( skipAliasesLenientContext, - Collections.singletonList("foo_a*") + "foo_a*" ); assertEquals(0, indices.size()); } { - IndexNotFoundException infe = expectThrows( - IndexNotFoundException.class, - () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - skipAliasesStrictContext, - Collections.singletonList("foo_a*") - ) + Set indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( + skipAliasesStrictContext, + "foo_a*" ); - assertEquals("foo_a*", infe.getIndex().getName()); + assertThat(indices, empty()); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAndAliasesContext, - Collections.singletonList("foo*") + "foo*" ); assertThat(indices, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( skipAliasesLenientContext, - Collections.singletonList("foo*") + "foo*" ); assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( skipAliasesStrictContext, - Collections.singletonList("foo*") + "foo*" ); assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } - { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - Collections.singletonList("foo_alias") - ); - assertThat(indices, containsInAnyOrder("foo_alias")); - } - { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - skipAliasesLenientContext, - Collections.singletonList("foo_alias") - ); - assertThat(indices, containsInAnyOrder("foo_alias")); - } - { - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> IndexNameExpressionResolver.resolveExpressions(skipAliasesStrictContext, "foo_alias") - ); - assertEquals( - "The provided expression [foo_alias] matches an alias, specify the corresponding concrete indices instead.", - iae.getMessage() - ); - } - IndicesOptions noExpandNoAliasesIndicesOptions = IndicesOptions.fromOptions(true, false, false, false, true, false, true, false); - IndexNameExpressionResolver.Context noExpandNoAliasesContext = new IndexNameExpressionResolver.Context( - state, - noExpandNoAliasesIndicesOptions, - SystemIndexAccessLevel.NONE - ); - { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - noExpandNoAliasesContext, - List.of("foo_alias") - ); - assertThat(indices, containsInAnyOrder("foo_alias")); - } - IndicesOptions strictNoExpandNoAliasesIndicesOptions = IndicesOptions.fromOptions( - false, - true, - false, - false, - true, - false, - true, - false - ); - IndexNameExpressionResolver.Context strictNoExpandNoAliasesContext = new IndexNameExpressionResolver.Context( - state, - strictNoExpandNoAliasesIndicesOptions, - SystemIndexAccessLevel.NONE - ); - { - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> IndexNameExpressionResolver.resolveExpressions(strictNoExpandNoAliasesContext, "foo_alias") - ); - assertEquals( - "The provided expression [foo_alias] matches an alias, specify the corresponding concrete indices instead.", - iae.getMessage() - ); - } } public void testResolveDataStreams() { @@ -654,17 +472,14 @@ public void testResolveDataStreams() { ); // data streams are not included but expression matches the data stream - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAndAliasesContext, - Collections.singletonList("foo_*") + "foo_*" ); assertThat(indices, containsInAnyOrder("foo_index", "foo_foo", "bar_index")); // data streams are not included and expression doesn't match the data steram - indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - Collections.singletonList("bar_*") - ); + indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(indicesAndAliasesContext, "bar_*"); assertThat(indices, containsInAnyOrder("bar_bar", "bar_index")); } @@ -691,9 +506,9 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAliasesAndDataStreamsContext, - Collections.singletonList("foo_*") + "foo_*" ); assertThat( indices, @@ -707,9 +522,9 @@ public void testResolveDataStreams() { ); // include all wildcard adds the data stream's backing indices - indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAliasesAndDataStreamsContext, - Collections.singletonList("*") + "*" ); assertThat( indices, @@ -748,9 +563,9 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAliasesDataStreamsAndHiddenIndices, - Collections.singletonList("foo_*") + "foo_*" ); assertThat( indices, @@ -764,9 +579,9 @@ public void testResolveDataStreams() { ); // include all wildcard adds the data stream's backing indices - indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + indices = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( indicesAliasesDataStreamsAndHiddenIndices, - Collections.singletonList("*") + "*" ); assertThat( indices, @@ -808,24 +623,17 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { SystemIndexAccessLevel.NONE ); - Collection matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("*")); + Collection matches = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources( + indicesAndAliasesContext, + "*" + ); assertThat(matches, containsInAnyOrder("bar_bar", "foo_foo", "foo_index", "bar_index")); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of("*")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(onlyIndicesContext, "*"); assertThat(matches, containsInAnyOrder("bar_bar", "foo_foo", "foo_index", "bar_index")); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("foo*")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(indicesAndAliasesContext, "foo*"); assertThat(matches, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of("foo*")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(onlyIndicesContext, "foo*"); assertThat(matches, containsInAnyOrder("foo_foo", "foo_index")); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("foo_alias")); - assertThat(matches, containsInAnyOrder("foo_alias")); - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> IndexNameExpressionResolver.resolveExpressions(onlyIndicesContext, "foo_alias") - ); - assertThat( - iae.getMessage(), - containsString("The provided expression [foo_alias] matches an alias, specify the corresponding concrete indices instead") - ); } private static IndexMetadata.Builder indexBuilder(String index, boolean hidden) { @@ -838,10 +646,6 @@ private static IndexMetadata.Builder indexBuilder(String index) { } private static void assertWildcardResolvesToEmpty(IndexNameExpressionResolver.Context context, String wildcardExpression) { - IndexNotFoundException infe = expectThrows( - IndexNotFoundException.class, - () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, List.of(wildcardExpression)) - ); - assertEquals(wildcardExpression, infe.getIndex().getName()); + assertThat(IndexNameExpressionResolver.WildcardExpressionResolver.matchWildcardToResources(context, wildcardExpression), empty()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTableTests.java index 21b30557cafea..912326162e5c4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTableTests.java @@ -27,15 +27,10 @@ public class IndexRoutingTableTests extends ESTestCase { public void testReadyForSearch() { - innerReadyForSearch(false); - innerReadyForSearch(true); - } - - private void innerReadyForSearch(boolean fastRefresh) { Index index = new Index(randomIdentifier(), UUIDs.randomBase64UUID()); ClusterState clusterState = mock(ClusterState.class, Mockito.RETURNS_DEEP_STUBS); when(clusterState.metadata().index(any(Index.class)).getSettings()).thenReturn( - Settings.builder().put(INDEX_FAST_REFRESH_SETTING.getKey(), fastRefresh).build() + Settings.builder().put(INDEX_FAST_REFRESH_SETTING.getKey(), randomBoolean()).build() ); // 2 primaries that are search and index ShardId p1 = new ShardId(index, 0); @@ -55,11 +50,7 @@ private void innerReadyForSearch(boolean fastRefresh) { shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY))); shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY))); indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 }); - if (fastRefresh) { - assertTrue(indexRoutingTable.readyForSearch(clusterState)); - } else { - assertFalse(indexRoutingTable.readyForSearch(clusterState)); - } + assertFalse(indexRoutingTable.readyForSearch(clusterState)); // 2 unassigned primaries that are index only shardTable1 = new IndexShardRoutingTable( @@ -91,11 +82,7 @@ private void innerReadyForSearch(boolean fastRefresh) { ) ); indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 }); - if (fastRefresh) { - assertTrue(indexRoutingTable.readyForSearch(clusterState)); - } else { - assertFalse(indexRoutingTable.readyForSearch(clusterState)); - } + assertFalse(indexRoutingTable.readyForSearch(clusterState)); // 2 primaries that are index only with some replicas that are all available shardTable1 = new IndexShardRoutingTable( @@ -118,8 +105,6 @@ private void innerReadyForSearch(boolean fastRefresh) { assertTrue(indexRoutingTable.readyForSearch(clusterState)); // 2 unassigned primaries that are index only with some replicas that are all available - // Fast refresh indices do not support replicas so this can not practically happen. If we add support we will want to ensure - // that readyForSearch allows for searching replicas when the index shard is not available. shardTable1 = new IndexShardRoutingTable( p1, List.of( @@ -137,11 +122,7 @@ private void innerReadyForSearch(boolean fastRefresh) { ) ); indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 }); - if (fastRefresh) { - assertFalse(indexRoutingTable.readyForSearch(clusterState)); // if we support replicas for fast refreshes this needs to change - } else { - assertTrue(indexRoutingTable.readyForSearch(clusterState)); - } + assertTrue(indexRoutingTable.readyForSearch(clusterState)); // 2 primaries that are index only with at least 1 replica per primary that is available shardTable1 = new IndexShardRoutingTable( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java index 0efa576a0cddc..35f1780464659 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -84,7 +84,7 @@ public void testShardStats() { clusterService, () -> clusterInfo, createShardAllocator(), - new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings()) ); assertThat( service.stats(), @@ -125,7 +125,7 @@ public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { clusterService, EmptyClusterInfoService.INSTANCE, createShardAllocator(), - new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings()) ); assertThat( service.stats(), @@ -182,7 +182,7 @@ public DesiredBalance getDesiredBalance() { ); } }, - new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings()) ); assertThat( service.stats(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java index 98c3451329f52..412329e51a485 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java @@ -59,8 +59,8 @@ import static java.util.stream.Collectors.toSet; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; -import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer.getIndexDiskUsageInBytes; import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING; +import static org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction.getIndexDiskUsageInBytes; import static org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index b18e2c0cd2647..9d33b697e31ca 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -57,6 +58,7 @@ import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.TestThreadPool; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Queue; @@ -79,7 +81,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class DesiredBalanceShardsAllocatorTests extends ESAllocationTestCase { @@ -916,6 +920,77 @@ public void resetDesiredBalance() { } } + public void testNotReconcileEagerlyForEmptyRoutingTable() { + final var threadPool = new TestThreadPool(getTestName()); + final var clusterService = ClusterServiceUtils.createClusterService(ClusterState.EMPTY_STATE, threadPool); + final var clusterSettings = createBuiltInClusterSettings(); + final var shardsAllocator = createShardsAllocator(); + final var reconciliationTaskSubmitted = new AtomicBoolean(); + final var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( + shardsAllocator, + threadPool, + clusterService, + new DesiredBalanceComputer(clusterSettings, TimeProviderUtils.create(() -> 1L), shardsAllocator) { + @Override + public DesiredBalance compute( + DesiredBalance previousDesiredBalance, + DesiredBalanceInput desiredBalanceInput, + Queue> pendingDesiredBalanceMoves, + Predicate isFresh + ) { + assertThat(previousDesiredBalance, sameInstance(DesiredBalance.INITIAL)); + return new DesiredBalance(desiredBalanceInput.index(), Map.of()); + } + }, + (clusterState, rerouteStrategy) -> null, + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS + ) { + + private ActionListener lastListener; + + @Override + public void allocate(RoutingAllocation allocation, ActionListener listener) { + lastListener = listener; + super.allocate(allocation, listener); + } + + @Override + protected void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { + fail("should not call reconcile"); + } + + @Override + protected void submitReconcileTask(DesiredBalance desiredBalance) { + assertThat(desiredBalance.lastConvergedIndex(), equalTo(0L)); + reconciliationTaskSubmitted.set(true); + lastListener.onResponse(null); + } + }; + assertThat(desiredBalanceShardsAllocator.getDesiredBalance(), sameInstance(DesiredBalance.INITIAL)); + try { + final PlainActionFuture future = new PlainActionFuture<>(); + desiredBalanceShardsAllocator.allocate( + new RoutingAllocation( + new AllocationDeciders(Collections.emptyList()), + clusterService.state(), + null, + null, + randomNonNegativeLong() + ), + future + ); + safeGet(future); + assertThat(desiredBalanceShardsAllocator.getStats().computationSubmitted(), equalTo(1L)); + assertThat(desiredBalanceShardsAllocator.getStats().computationExecuted(), equalTo(1L)); + assertThat(reconciliationTaskSubmitted.get(), is(true)); + assertThat(desiredBalanceShardsAllocator.getDesiredBalance().lastConvergedIndex(), equalTo(0L)); + } finally { + clusterService.close(); + terminate(threadPool); + } + } + private static IndexMetadata createIndex(String name) { return IndexMetadata.builder(name).settings(indexSettings(IndexVersion.current(), 1, 0)).build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java deleted file mode 100644 index 9eec8309bbb83..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.service; - -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.client.internal.ClusterAdminClient; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.TransportVersionsFixupListener.NodeTransportVersionTask; -import org.elasticsearch.cluster.version.CompatibilityVersions; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.indices.SystemIndexDescriptor; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.Scheduler; -import org.mockito.ArgumentCaptor; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Executor; - -import static java.util.Map.entry; -import static org.elasticsearch.test.LambdaMatchers.transformedMatch; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.everyItem; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.same; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.hamcrest.MockitoHamcrest.argThat; - -public class TransportVersionsFixupListenerTests extends ESTestCase { - - private static final Version NEXT_VERSION = Version.V_8_8_1; - private static final TransportVersion NEXT_TRANSPORT_VERSION = TransportVersion.fromId(NEXT_VERSION.id); - - @SuppressWarnings("unchecked") - private static MasterServiceTaskQueue newMockTaskQueue() { - return mock(MasterServiceTaskQueue.class); - } - - private static DiscoveryNodes node(Version... versions) { - var builder = DiscoveryNodes.builder(); - for (int i = 0; i < versions.length; i++) { - builder.add(DiscoveryNodeUtils.create("node" + i, new TransportAddress(TransportAddress.META_ADDRESS, 9200 + i), versions[i])); - } - builder.localNodeId("node0").masterNodeId("node0"); - return builder.build(); - } - - @SafeVarargs - private static Map versions(T... versions) { - Map tvs = new HashMap<>(); - for (int i = 0; i < versions.length; i++) { - tvs.put("node" + i, versions[i]); - } - return tvs; - } - - private static NodesInfoResponse getResponse(Map responseData) { - return new NodesInfoResponse( - ClusterName.DEFAULT, - responseData.entrySet() - .stream() - .map( - e -> new NodeInfo( - "", - e.getValue(), - null, - null, - null, - DiscoveryNodeUtils.create(e.getKey(), new TransportAddress(TransportAddress.META_ADDRESS, 9200)), - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ) - .toList(), - List.of() - ); - } - - public void testNothingFixedWhenNothingToInfer() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(Version.V_8_8_0)) - .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) - .build(); - - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( - taskQueue, - client, - new FeatureService(List.of(new TransportFeatures())), - null, - null - ); - listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - - verify(taskQueue, never()).submitTask(anyString(), any(), any()); - } - - public void testNothingFixedWhenOnNextVersion() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION)) - .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) - .build(); - - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( - taskQueue, - client, - new FeatureService(List.of(new TransportFeatures())), - null, - null - ); - listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - - verify(taskQueue, never()).submitTask(anyString(), any(), any()); - } - - public void testNothingFixedWhenOnPreviousVersion() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(Version.V_8_7_0, Version.V_8_8_0)) - .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(TransportVersions.V_8_7_0, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) - ) - ) - .build(); - - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( - taskQueue, - client, - new FeatureService(List.of(new TransportFeatures())), - null, - null - ); - listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - - verify(taskQueue, never()).submitTask(anyString(), any(), any()); - } - - @SuppressWarnings("unchecked") - public void testVersionsAreFixed() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) - ) - ) - .build(); - - ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); - ArgumentCaptor task = ArgumentCaptor.forClass(NodeTransportVersionTask.class); - - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( - taskQueue, - client, - new FeatureService(List.of(new TransportFeatures())), - null, - null - ); - listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); - verify(client).nodesInfo( - argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), - action.capture() - ); - action.getValue() - .onResponse( - getResponse( - Map.ofEntries( - entry("node1", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())), - entry("node2", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())) - ) - ) - ); - verify(taskQueue).submitTask(anyString(), task.capture(), any()); - - assertThat(task.getValue().results().keySet(), equalTo(Set.of("node1", "node2"))); - assertThat(task.getValue().results().values(), everyItem(equalTo(NEXT_TRANSPORT_VERSION))); - } - - public void testConcurrentChangesDoNotOverlap() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - - ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) - ) - ) - .build(); - - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( - taskQueue, - client, - new FeatureService(List.of(new TransportFeatures())), - null, - null - ); - listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); - verify(client).nodesInfo(argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), any()); - // don't send back the response yet - - ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) - ) - ) - .build(); - // should not send any requests - listeners.clusterChanged(new ClusterChangedEvent("test", testState2, testState1)); - verifyNoMoreInteractions(client); - } - - @SuppressWarnings("unchecked") - public void testFailedRequestsAreRetried() { - MasterServiceTaskQueue taskQueue = newMockTaskQueue(); - ClusterAdminClient client = mock(ClusterAdminClient.class); - Scheduler scheduler = mock(Scheduler.class); - Executor executor = mock(Executor.class); - - var compatibilityVersions = new CompatibilityVersions( - TransportVersion.current(), - Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) - ); - ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(Version.CURRENT, Version.CURRENT, Version.CURRENT)) - .nodeIdsToCompatibilityVersions( - Map.ofEntries( - entry("node0", compatibilityVersions), - entry("node1", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())), - entry("node2", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())) - ) - ) - .build(); - - ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); - ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); - - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener( - taskQueue, - client, - new FeatureService(List.of(new TransportFeatures())), - scheduler, - executor - ); - listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); - verify(client, times(1)).nodesInfo(any(), action.capture()); - // do response immediately - action.getValue().onFailure(new RuntimeException("failure")); - verify(scheduler).schedule(retry.capture(), any(), same(executor)); - - // running retry should cause another check - retry.getValue().run(); - verify(client, times(2)).nodesInfo( - argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), - any() - ); - } -} diff --git a/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java new file mode 100644 index 0000000000000..964683a1972ba --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/TimeBasedUUIDGeneratorTests.java @@ -0,0 +1,270 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common; + +import org.elasticsearch.test.ESTestCase; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Base64; +import java.util.HashSet; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.IntStream; + +public class TimeBasedUUIDGeneratorTests extends ESTestCase { + + public void testTimeBasedUUIDGeneration() { + assertUUIDFormat(createGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testTimeBasedUUIDUniqueness() { + assertUUIDUniqueness(createGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testTimeBasedUUIDSequenceOverflow() { + // The assumption here is that our system will not generate more than 1000 UUIDs within the same millisecond. + // The sequence ID is set close to its max value (0x00FF_FFFF) to quickly trigger an overflow. + // However, since we are generating only 1000 UUIDs, the timestamp is expected to change at least once, + // ensuring uniqueness even if the sequence ID wraps around. + assertEquals( + 1000, + generateUUIDs( + createGenerator(() -> Instant.now().toEpochMilli(), () -> 0x00FF_FFFF - 10, new TestRandomMacAddressSupplier()), + 1000 + ).size() + ); + } + + public void testTimeBasedUUIDClockReset() { + // Simulate a clock that resets itself after reaching a threshold. + final Supplier unreliableClock = new TestClockResetTimestampSupplier( + Instant.now(), + 1, + 50, + ChronoUnit.MILLIS, + Instant.now().plus(100, ChronoUnit.MILLIS) + ); + final UUIDGenerator generator = createGenerator(unreliableClock, () -> 0, new TestRandomMacAddressSupplier()); + + final Set beforeReset = generateUUIDs(generator, 5_000); + final Set afterReset = generateUUIDs(generator, 5_000); + + // Ensure all UUIDs are unique, even after the clock resets. + assertEquals(5_000, beforeReset.size()); + assertEquals(5_000, afterReset.size()); + beforeReset.addAll(afterReset); + assertEquals(10_000, beforeReset.size()); + } + + public void testKOrderedUUIDGeneration() { + assertUUIDFormat(createKOrderedGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), 100_000); + } + + public void testKOrderedUUIDUniqueness() { + assertUUIDUniqueness( + createKOrderedGenerator(() -> Instant.now().toEpochMilli(), () -> 0, new TestRandomMacAddressSupplier()), + 100_000 + ); + } + + public void testKOrderedUUIDSequenceOverflow() { + final UUIDGenerator generator = createKOrderedGenerator( + () -> Instant.now().toEpochMilli(), + () -> 0x00FF_FFFF - 10, + new TestRandomMacAddressSupplier() + ); + final Set uuids = generateUUIDs(generator, 1000); + + // The assumption here is that our system will not generate more than 1000 UUIDs within the same millisecond. + // The sequence ID is set close to its max value (0x00FF_FFFF) to quickly trigger an overflow. + // However, since we are generating only 1000 UUIDs, the timestamp is expected to change at least once, + // ensuring uniqueness even if the sequence ID wraps around. + assertEquals(1000, uuids.size()); + } + + public void testUUIDEncodingDecoding() { + testUUIDEncodingDecodingHelper( + Instant.parse("2024-11-13T10:12:43Z").toEpochMilli(), + 12345, + new TestRandomMacAddressSupplier().get() + ); + } + + public void testUUIDEncodingDecodingWithRandomValues() { + testUUIDEncodingDecodingHelper( + randomInstantBetween(Instant.now().minus(1, ChronoUnit.DAYS), Instant.now()).toEpochMilli(), + randomIntBetween(0, 0x00FF_FFFF), + new TestRandomMacAddressSupplier().get() + ); + } + + private void testUUIDEncodingDecodingHelper(final long timestamp, final int sequenceId, final byte[] macAddress) { + final TestTimeBasedKOrderedUUIDDecoder decoder = new TestTimeBasedKOrderedUUIDDecoder( + createKOrderedGenerator(() -> timestamp, () -> sequenceId, () -> macAddress).getBase64UUID() + ); + + // The sequence ID is incremented by 1 when generating the UUID. + assertEquals("Sequence ID does not match", sequenceId + 1, decoder.decodeSequenceId()); + // Truncate the timestamp to milliseconds to match the UUID generation granularity. + assertEquals( + "Timestamp does not match", + Instant.ofEpochMilli(timestamp).truncatedTo(ChronoUnit.MILLIS), + Instant.ofEpochMilli(decoder.decodeTimestamp()).truncatedTo(ChronoUnit.MILLIS) + ); + assertArrayEquals("MAC address does not match", macAddress, decoder.decodeMacAddress()); + } + + private void assertUUIDUniqueness(final UUIDGenerator generator, final int count) { + assertEquals(count, generateUUIDs(generator, count).size()); + } + + private Set generateUUIDs(final UUIDGenerator generator, final int count) { + return IntStream.range(0, count).mapToObj(i -> generator.getBase64UUID()).collect(HashSet::new, Set::add, Set::addAll); + } + + private void assertUUIDFormat(final UUIDGenerator generator, final int count) { + IntStream.range(0, count).forEach(i -> { + final String uuid = generator.getBase64UUID(); + assertNotNull(uuid); + assertEquals(20, uuid.length()); + assertFalse(uuid.contains("+")); + assertFalse(uuid.contains("/")); + assertFalse(uuid.contains("=")); + }); + } + + private UUIDGenerator createGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + return new TimeBasedUUIDGenerator(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + private UUIDGenerator createKOrderedGenerator( + final Supplier timestampSupplier, + final Supplier sequenceIdSupplier, + final Supplier macAddressSupplier + ) { + return new TimeBasedKOrderedUUIDGenerator(timestampSupplier, sequenceIdSupplier, macAddressSupplier); + } + + private static class TestRandomMacAddressSupplier implements Supplier { + private final byte[] macAddress = new byte[] { randomByte(), randomByte(), randomByte(), randomByte(), randomByte(), randomByte() }; + + @Override + public byte[] get() { + return macAddress; + } + } + + /** + * A {@link Supplier} implementation that simulates a clock that can move forward or backward in time. + * This supplier provides timestamps in milliseconds since the epoch, adjusting based on a given delta + * until a reset threshold is reached. After crossing the threshold, the timestamp moves backwards by a reset delta. + */ + private static class TestClockResetTimestampSupplier implements Supplier { + private Instant currentTime; + private final long delta; + private final long resetDelta; + private final ChronoUnit unit; + private final Instant resetThreshold; + + /** + * Constructs a new {@link TestClockResetTimestampSupplier}. + * + * @param startTime The initial starting time. + * @param delta The amount of time to add to the current time in each forward step. + * @param resetDelta The amount of time to subtract once the reset threshold is reached. + * @param unit The unit of time for both delta and resetDelta. + * @param resetThreshold The threshold after which the time is reset backwards. + */ + TestClockResetTimestampSupplier( + final Instant startTime, + final long delta, + final long resetDelta, + final ChronoUnit unit, + final Instant resetThreshold + ) { + this.currentTime = startTime; + this.delta = delta; + this.resetDelta = resetDelta; + this.unit = unit; + this.resetThreshold = resetThreshold; + } + + /** + * Provides the next timestamp in milliseconds since the epoch. + * If the current time is before the reset threshold, it advances the time by the delta. + * Otherwise, it subtracts the reset delta. + * + * @return The current time in milliseconds since the epoch. + */ + @Override + public Long get() { + if (currentTime.isBefore(resetThreshold)) { + currentTime = currentTime.plus(delta, unit); + } else { + currentTime = currentTime.minus(resetDelta, unit); + } + return currentTime.toEpochMilli(); + } + } + + /** + * A utility class to decode the K-ordered UUID extracting the original timestamp, MAC address and sequence ID. + */ + private static class TestTimeBasedKOrderedUUIDDecoder { + + private final byte[] decodedBytes; + + /** + * Constructs a new {@link TestTimeBasedKOrderedUUIDDecoder} using a base64-encoded UUID string. + * + * @param base64UUID The base64-encoded UUID string to decode. + */ + TestTimeBasedKOrderedUUIDDecoder(final String base64UUID) { + this.decodedBytes = Base64.getUrlDecoder().decode(base64UUID); + } + + /** + * Decodes the timestamp from the UUID using the following bytes: + * 0 (most significant), 1, 2, 3, 11, 13 (least significant). + * + * @return The decoded timestamp in milliseconds. + */ + public long decodeTimestamp() { + return ((long) (decodedBytes[0] & 0xFF) << 40) | ((long) (decodedBytes[1] & 0xFF) << 32) | ((long) (decodedBytes[2] & 0xFF) + << 24) | ((long) (decodedBytes[3] & 0xFF) << 16) | ((long) (decodedBytes[11] & 0xFF) << 8) | (decodedBytes[13] & 0xFF); + } + + /** + * Decodes the MAC address from the UUID using bytes 4 to 9. + * + * @return The decoded MAC address as a byte array. + */ + public byte[] decodeMacAddress() { + byte[] macAddress = new byte[6]; + System.arraycopy(decodedBytes, 4, macAddress, 0, 6); + return macAddress; + } + + /** + * Decodes the sequence ID from the UUID using bytes: + * 10 (most significant), 12 (middle), 14 (least significant). + * + * @return The decoded sequence ID. + */ + public int decodeSequenceId() { + return ((decodedBytes[10] & 0xFF) << 16) | ((decodedBytes[12] & 0xFF) << 8) | (decodedBytes[14] & 0xFF); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 9fbeaf1c6c081..71c705f5df511 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -27,26 +27,37 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; +import java.util.Base64; import java.util.HashSet; import java.util.Random; import java.util.Set; +import java.util.function.Supplier; public class UUIDTests extends ESTestCase { - static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator(); + static final Base64.Decoder BASE_64_URL_DECODER = Base64.getUrlDecoder(); + static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator( + UUIDs.DEFAULT_TIMESTAMP_SUPPLIER, + UUIDs.DEFAULT_SEQUENCE_ID_SUPPLIER, + UUIDs.DEFAULT_MAC_ADDRESS_SUPPLIER + ); static UUIDGenerator randomUUIDGen = new RandomBasedUUIDGenerator(); - static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator(); + static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator( + UUIDs.DEFAULT_TIMESTAMP_SUPPLIER, + UUIDs.DEFAULT_SEQUENCE_ID_SUPPLIER, + UUIDs.DEFAULT_MAC_ADDRESS_SUPPLIER + ); public void testRandomUUID() { - verifyUUIDSet(100000, randomUUIDGen); + verifyUUIDSet(100000, randomUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testTimeUUID() { - verifyUUIDSet(100000, timeUUIDGen); + verifyUUIDSet(100000, timeUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testKOrderedUUID() { - verifyUUIDSet(100000, kOrderedUUIDGen); + verifyUUIDSet(100000, kOrderedUUIDGen).forEach(this::verifyUUIDIsUrlSafe); } public void testThreadedRandomUUID() { @@ -143,6 +154,7 @@ public void testUUIDThreaded(UUIDGenerator uuidSource) { globalSet.addAll(runner.uuidSet); } assertEquals(count * uuids, globalSet.size()); + globalSet.forEach(this::verifyUUIDIsUrlSafe); } private static double testCompression(final UUIDGenerator generator, int numDocs, int numDocsPerSecond, int numNodes, Logger logger) @@ -158,35 +170,25 @@ private static double testCompression(final UUIDGenerator generator, int numDocs UUIDGenerator uuidSource = generator; if (generator instanceof TimeBasedUUIDGenerator) { if (generator instanceof TimeBasedKOrderedUUIDGenerator) { - uuidSource = new TimeBasedKOrderedUUIDGenerator() { + uuidSource = new TimeBasedKOrderedUUIDGenerator(new Supplier<>() { double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); @Override - protected long currentTimeMillis() { + public Long get() { currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); return (long) currentTimeMillis; } - - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; + }, () -> 0, () -> RandomPicks.randomFrom(r, macAddresses)); } else { - uuidSource = new TimeBasedUUIDGenerator() { + uuidSource = new TimeBasedUUIDGenerator(new Supplier<>() { double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); @Override - protected long currentTimeMillis() { + public Long get() { currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); return (long) currentTimeMillis; } - - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; + }, () -> 0, () -> RandomPicks.randomFrom(r, macAddresses)); } } @@ -237,4 +239,13 @@ public void testStringLength() { private static int getUnpaddedBase64StringLength(int sizeInBytes) { return (int) Math.ceil(sizeInBytes * 4.0 / 3.0); } + + private void verifyUUIDIsUrlSafe(final String uuid) { + assertFalse("UUID should not contain padding characters: " + uuid, uuid.contains("=")); + try { + BASE_64_URL_DECODER.decode(uuid); + } catch (IllegalArgumentException e) { + throw new AssertionError("UUID is not a valid Base64 URL-safe encoded string: " + uuid); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java b/server/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java index ad298e7aa8307..e067be6b1b0da 100644 --- a/server/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java @@ -107,4 +107,5 @@ public void testGetDoubleLE() { Exception e = expectThrows(ArrayIndexOutOfBoundsException.class, () -> ref.getDoubleLE(9)); assertThat(e.getMessage(), equalTo("Index 9 out of bounds for length 9")); } + } diff --git a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java index e103704c89649..874a6a96313e4 100644 --- a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java +++ b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java @@ -9,15 +9,9 @@ package org.elasticsearch.features; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import java.util.List; import java.util.Map; @@ -30,79 +24,36 @@ public class FeatureServiceTests extends ESTestCase { private static class TestFeatureSpecification implements FeatureSpecification { private final Set features; - private final Map historicalFeatures; - private TestFeatureSpecification(Set features, Map historicalFeatures) { + private TestFeatureSpecification(Set features) { this.features = features; - this.historicalFeatures = historicalFeatures; } @Override public Set getFeatures() { return features; } - - @Override - public Map getHistoricalFeatures() { - return historicalFeatures; - } } public void testFailsDuplicateFeatures() { // these all need to be separate classes to trigger the exception - FeatureSpecification fs1 = new TestFeatureSpecification(Set.of(new NodeFeature("f1")), Map.of()) { - }; - FeatureSpecification fs2 = new TestFeatureSpecification(Set.of(new NodeFeature("f1")), Map.of()) { - }; - FeatureSpecification hfs1 = new TestFeatureSpecification(Set.of(), Map.of(new NodeFeature("f1"), Version.V_8_11_0)) { + FeatureSpecification fs1 = new TestFeatureSpecification(Set.of(new NodeFeature("f1"))) { }; - FeatureSpecification hfs2 = new TestFeatureSpecification(Set.of(), Map.of(new NodeFeature("f1"), Version.V_8_11_0)) { + FeatureSpecification fs2 = new TestFeatureSpecification(Set.of(new NodeFeature("f1"))) { }; assertThat( expectThrows(IllegalArgumentException.class, () -> new FeatureService(List.of(fs1, fs2))).getMessage(), containsString("Duplicate feature") ); - assertThat( - expectThrows(IllegalArgumentException.class, () -> new FeatureService(List.of(hfs1, hfs2))).getMessage(), - containsString("Duplicate feature") - ); - assertThat( - expectThrows(IllegalArgumentException.class, () -> new FeatureService(List.of(fs1, hfs1))).getMessage(), - containsString("Duplicate feature") - ); - } - - public void testFailsNonHistoricalVersion() { - FeatureSpecification fs = new TestFeatureSpecification( - Set.of(), - Map.of(new NodeFeature("f1"), Version.fromId(FeatureService.CLUSTER_FEATURES_ADDED_VERSION.id + 1)) - ); - - assertThat( - expectThrows(IllegalArgumentException.class, () -> new FeatureService(List.of(fs))).getMessage(), - containsString("not a historical version") - ); - } - - public void testFailsSameRegularAndHistoricalFeature() { - FeatureSpecification fs = new TestFeatureSpecification( - Set.of(new NodeFeature("f1")), - Map.of(new NodeFeature("f1"), Version.V_8_12_0) - ); - - assertThat( - expectThrows(IllegalArgumentException.class, () -> new FeatureService(List.of(fs))).getMessage(), - containsString("cannot be declared as both a regular and historical feature") - ); } public void testGetNodeFeaturesCombinesAllSpecs() { List specs = List.of( - new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2")), Map.of()), - new TestFeatureSpecification(Set.of(new NodeFeature("f3")), Map.of()), - new TestFeatureSpecification(Set.of(new NodeFeature("f4"), new NodeFeature("f5")), Map.of()), - new TestFeatureSpecification(Set.of(), Map.of()) + new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2"))), + new TestFeatureSpecification(Set.of(new NodeFeature("f3"))), + new TestFeatureSpecification(Set.of(new NodeFeature("f4"), new NodeFeature("f5"))), + new TestFeatureSpecification(Set.of()) ); FeatureService service = new FeatureService(specs); @@ -111,10 +62,10 @@ public void testGetNodeFeaturesCombinesAllSpecs() { public void testStateHasFeatures() { List specs = List.of( - new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2")), Map.of()), - new TestFeatureSpecification(Set.of(new NodeFeature("f3")), Map.of()), - new TestFeatureSpecification(Set.of(new NodeFeature("f4"), new NodeFeature("f5")), Map.of()), - new TestFeatureSpecification(Set.of(), Map.of()) + new TestFeatureSpecification(Set.of(new NodeFeature("f1"), new NodeFeature("f2"))), + new TestFeatureSpecification(Set.of(new NodeFeature("f3"))), + new TestFeatureSpecification(Set.of(new NodeFeature("f4"), new NodeFeature("f5"))), + new TestFeatureSpecification(Set.of()) ); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) @@ -130,50 +81,4 @@ public void testStateHasFeatures() { assertFalse(service.clusterHasFeature(state, new NodeFeature("nf2"))); assertFalse(service.clusterHasFeature(state, new NodeFeature("nf3"))); } - - private static ClusterState stateWithMinVersion(Version version) { - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); - nodes.add(DiscoveryNodeUtils.builder("node").version(version, IndexVersions.ZERO, IndexVersion.current()).build()); - for (int n = randomInt(5); n >= 0; n--) { - nodes.add( - DiscoveryNodeUtils.builder("node" + n) - .version( - VersionUtils.randomVersionBetween(random(), version, Version.CURRENT), - IndexVersions.ZERO, - IndexVersion.current() - ) - .build() - ); - } - - return ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).build(); - } - - public void testStateHasHistoricalFeatures() { - NodeFeature v8_11_0 = new NodeFeature("hf_8.11.0"); - NodeFeature v8_10_0 = new NodeFeature("hf_8.10.0"); - NodeFeature v7_17_0 = new NodeFeature("hf_7.17.0"); - List specs = List.of( - new TestFeatureSpecification(Set.of(), Map.of(v8_11_0, Version.V_8_11_0)), - new TestFeatureSpecification(Set.of(), Map.of(v8_10_0, Version.V_8_10_0)), - new TestFeatureSpecification(Set.of(), Map.of(v7_17_0, Version.V_7_17_0)) - ); - - FeatureService service = new FeatureService(specs); - assertTrue(service.clusterHasFeature(stateWithMinVersion(Version.V_8_11_0), v8_11_0)); - assertTrue(service.clusterHasFeature(stateWithMinVersion(Version.V_8_11_0), v8_10_0)); - assertTrue(service.clusterHasFeature(stateWithMinVersion(Version.V_8_11_0), v7_17_0)); - - assertFalse(service.clusterHasFeature(stateWithMinVersion(Version.V_8_10_0), v8_11_0)); - assertTrue(service.clusterHasFeature(stateWithMinVersion(Version.V_8_10_0), v8_10_0)); - assertTrue(service.clusterHasFeature(stateWithMinVersion(Version.V_8_10_0), v7_17_0)); - - assertFalse(service.clusterHasFeature(stateWithMinVersion(Version.V_7_17_0), v8_11_0)); - assertFalse(service.clusterHasFeature(stateWithMinVersion(Version.V_7_17_0), v8_10_0)); - assertTrue(service.clusterHasFeature(stateWithMinVersion(Version.V_7_17_0), v7_17_0)); - - assertFalse(service.clusterHasFeature(stateWithMinVersion(Version.V_7_16_0), v8_11_0)); - assertFalse(service.clusterHasFeature(stateWithMinVersion(Version.V_7_16_0), v8_10_0)); - assertFalse(service.clusterHasFeature(stateWithMinVersion(Version.V_7_16_0), v7_17_0)); - } } diff --git a/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java index 6713042002fa3..07aa9af3b4030 100644 --- a/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.Diagnosis; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorResult; @@ -1085,12 +1084,8 @@ static ClusterState createClusterState( Collection nodes, Map> indexNameToNodeIdsMap ) { - Map> features = new HashMap<>(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); - for (DiscoveryNode node : nodes) { - nodesBuilder = nodesBuilder.add(node); - features.put(node.getId(), Set.of(HealthFeatures.SUPPORTS_HEALTH.id())); - } + nodes.forEach(nodesBuilder::add); nodesBuilder.localNodeId(randomFrom(nodes).getId()); nodesBuilder.masterNodeId(randomFrom(nodes).getId()); ClusterBlocks.Builder clusterBlocksBuilder = new ClusterBlocks.Builder(); @@ -1125,7 +1120,6 @@ static ClusterState createClusterState( state.metadata(metadata.generateClusterUuidIfNeeded().build()); state.routingTable(routingTable.build()); state.blocks(clusterBlocksBuilder); - state.nodeFeatures(features); return state.build(); } diff --git a/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java index 7a578650b7cbd..15ef2e150761f 100644 --- a/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/ShardsCapacityHealthIndicatorServiceTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.metadata.HealthMetadata; @@ -451,11 +450,7 @@ private ClusterState createClusterState( metadata.put(idxMetadata); } - var features = Set.of(HealthFeatures.SUPPORTS_SHARDS_CAPACITY_INDICATOR.id()); - return ClusterState.builder(clusterState) - .metadata(metadata) - .nodeFeatures(Map.of(dataNode.getId(), features, frozenNode.getId(), features)) - .build(); + return ClusterState.builder(clusterState).metadata(metadata).build(); } private static IndexMetadata.Builder createIndexInDataNode(int shards) { diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 19d92568e6528..fa774c0bcfd12 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -271,7 +271,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel( fakeRequest, - true, + randomBoolean(), RestStatus.BAD_REQUEST ); @@ -361,7 +361,11 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th Map> restHeaders = new HashMap<>(); restHeaders.put(Task.TRACE_PARENT_HTTP_HEADER, Collections.singletonList(traceParentValue)); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel( + fakeRequest, + randomBoolean(), + RestStatus.BAD_REQUEST + ); try ( AbstractHttpServerTransport transport = new AbstractHttpServerTransport( diff --git a/server/src/test/java/org/elasticsearch/http/TestHttpRequest.java b/server/src/test/java/org/elasticsearch/http/TestHttpRequest.java index 8cd61453a3391..27dc0be673abb 100644 --- a/server/src/test/java/org/elasticsearch/http/TestHttpRequest.java +++ b/server/src/test/java/org/elasticsearch/http/TestHttpRequest.java @@ -85,11 +85,6 @@ public HttpResponse createResponse(RestStatus status, ChunkedRestResponseBodyPar @Override public void release() {} - @Override - public HttpRequest releaseAndCopy() { - return this; - } - @Override public Exception getInboundException() { return null; diff --git a/server/src/test/java/org/elasticsearch/index/IndexingPressureTests.java b/server/src/test/java/org/elasticsearch/index/IndexingPressureTests.java index b4130120372a1..8da7ada91856d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingPressureTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingPressureTests.java @@ -37,6 +37,31 @@ public void testMemoryLimitSettingsFallbackToOldSingleLimitSetting() { assertThat(IndexingPressure.MAX_REPLICA_BYTES.get(settings), Matchers.equalTo(ByteSizeValue.ofKb(30))); } + public void testHighAndLowWatermarkSplits() { + IndexingPressure indexingPressure = new IndexingPressure(settings); + + try ( + Releasable ignored1 = indexingPressure.markCoordinatingOperationStarted(10, ByteSizeValue.ofKb(6).getBytes(), false); + Releasable ignored2 = indexingPressure.markCoordinatingOperationStarted(10, ByteSizeValue.ofKb(2).getBytes(), false) + ) { + assertFalse(indexingPressure.shouldSplitBulk(randomIntBetween(1, 1000))); + assertEquals(indexingPressure.stats().getHighWaterMarkSplits(), 0L); + assertEquals(indexingPressure.stats().getLowWaterMarkSplits(), 0L); + assertTrue(indexingPressure.shouldSplitBulk(randomIntBetween(1025, 10000))); + assertEquals(indexingPressure.stats().getHighWaterMarkSplits(), 0L); + assertEquals(indexingPressure.stats().getLowWaterMarkSplits(), 1L); + + try (Releasable ignored3 = indexingPressure.markPrimaryOperationStarted(10, ByteSizeValue.ofKb(1).getBytes(), false)) { + assertFalse(indexingPressure.shouldSplitBulk(randomIntBetween(1, 127))); + assertEquals(indexingPressure.stats().getHighWaterMarkSplits(), 0L); + assertEquals(indexingPressure.stats().getLowWaterMarkSplits(), 1L); + assertTrue(indexingPressure.shouldSplitBulk(randomIntBetween(129, 1000))); + assertEquals(indexingPressure.stats().getHighWaterMarkSplits(), 1L); + assertEquals(indexingPressure.stats().getLowWaterMarkSplits(), 1L); + } + } + } + public void testHighAndLowWatermarkSettings() { IndexingPressure indexingPressure = new IndexingPressure(settings); diff --git a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java index d7d5c886e0741..997cb123dbf8e 100644 --- a/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -48,7 +48,6 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.cluster.node.DiscoveryNode.STATELESS_ENABLED_SETTING_NAME; -import static org.elasticsearch.index.IndexSettings.INDEX_FAST_REFRESH_SETTING; import static org.elasticsearch.index.cache.bitset.BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -94,17 +93,7 @@ public void testInvalidateEntries() throws Exception { DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0)); - BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, BitsetFilterCache.Listener.NOOP); BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value"))); assertThat(matchCount(filter, reader), equalTo(3)); @@ -237,17 +226,7 @@ public void testSetNullListener() { } public void testRejectOtherIndex() throws IOException { - BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); + BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, BitsetFilterCache.Listener.NOOP); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -273,35 +252,21 @@ public void testShouldLoadRandomAccessFiltersEagerly() { for (var hasIndexRole : values) { for (var loadFiltersEagerly : values) { for (var isStateless : values) { - for (var fastRefresh : values) { - if (isStateless == false && fastRefresh) { - // fast refresh is only relevant for stateless indices - continue; - } - - boolean result = BitsetFilterCache.shouldLoadRandomAccessFiltersEagerly( - bitsetFilterCacheSettings(isStateless, hasIndexRole, loadFiltersEagerly, fastRefresh) - ); - if (isStateless) { - assertEquals(loadFiltersEagerly && ((hasIndexRole && fastRefresh) || hasIndexRole == false), result); - } else { - assertEquals(loadFiltersEagerly, result); - } + boolean result = BitsetFilterCache.shouldLoadRandomAccessFiltersEagerly( + bitsetFilterCacheSettings(isStateless, hasIndexRole, loadFiltersEagerly) + ); + if (isStateless) { + assertEquals(loadFiltersEagerly && hasIndexRole == false, result); + } else { + assertEquals(loadFiltersEagerly, result); } } } } } - private IndexSettings bitsetFilterCacheSettings( - boolean isStateless, - boolean hasIndexRole, - boolean loadFiltersEagerly, - boolean fastRefresh - ) { - var indexSettingsBuilder = Settings.builder() - .put(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), loadFiltersEagerly) - .put(INDEX_FAST_REFRESH_SETTING.getKey(), fastRefresh); + private IndexSettings bitsetFilterCacheSettings(boolean isStateless, boolean hasIndexRole, boolean loadFiltersEagerly) { + var indexSettingsBuilder = Settings.builder().put(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING.getKey(), loadFiltersEagerly); var nodeSettingsBuilder = Settings.builder() .putList( diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index 9e4a19eb039fd..6b1ffc3693636 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -19,7 +19,6 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; @@ -30,7 +29,6 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.script.ScriptCompiler; @@ -132,13 +130,7 @@ private CodecService createCodecService() throws IOException { Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER ); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP); MapperService service = new MapperService( () -> TransportVersion.current(), settings, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index df03426354864..db9fdead949de 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -75,7 +75,8 @@ public void testAddFields() throws Exception { merged, merged.toCompressedXContent(), IndexVersion.current(), - MapperMetrics.NOOP + MapperMetrics.NOOP, + "myIndex" ); assertThat(mergedMapper.mappers().getMapper("age"), notNullValue()); assertThat(mergedMapper.mappers().getMapper("obj1.prop1"), notNullValue()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 71b52dc41705b..09d57d0e34c3c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -2625,7 +2625,8 @@ same name need to be part of the same mappings (hence the same document). If th newMapping, newMapping.toCompressedXContent(), IndexVersion.current(), - MapperMetrics.NOOP + MapperMetrics.NOOP, + "myIndex" ); ParsedDocument doc2 = newDocMapper.parse(source(""" { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index 399740e6200e6..d4d0e67ff4141 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -69,7 +69,7 @@ public void testCreateDynamicStringFieldAsKeywordForDimension() throws IOExcepti XContentParser parser = createParser(JsonXContent.jsonXContent, source); SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); - SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); + SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false, false).setSynthetic().build(); RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new PassThroughObjectMapper.Builder("labels").setPriority(0).setContainsDimensions().dynamic(ObjectMapper.Dynamic.TRUE) ).build(MapperBuilderContext.root(false, false)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 0bf4c36d70a90..e0f58b8922be2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.index.mapper; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; @@ -20,7 +19,6 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.ScriptService; @@ -47,13 +45,7 @@ private static MappingParser createMappingParser(Settings settings, IndexVersion IndexAnalyzers indexAnalyzers = createIndexAnalyzers(); SimilarityService similarityService = new SimilarityService(indexSettings, scriptService, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); Supplier mappingParserContextSupplier = () -> new MappingParserContext( similarityService::getSimilarity, type -> mapperRegistry.getMapperParser(type, indexSettings.getIndexVersionCreated()), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index df6d9380fd141..4d6a30849e263 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -52,7 +52,8 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck( "enabled", topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("enabled", false).endObject()), - topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("enabled", true).endObject()) + topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("enabled", true).endObject()), + dm -> {} ); checker.registerUpdateCheck( topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("enabled", true).endObject()), @@ -62,14 +63,17 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerUpdateCheck( topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "stored").endObject()), topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "synthetic").endObject()), - dm -> assertTrue(dm.metadataMapper(SourceFieldMapper.class).isSynthetic()) + dm -> { + assertTrue(dm.metadataMapper(SourceFieldMapper.class).isSynthetic()); + } ); checker.registerConflictCheck("includes", b -> b.array("includes", "foo*")); checker.registerConflictCheck("excludes", b -> b.array("excludes", "foo*")); checker.registerConflictCheck( "mode", topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "synthetic").endObject()), - topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "stored").endObject()) + topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "stored").endObject()), + d -> {} ); } @@ -212,7 +216,6 @@ public void testSyntheticUpdates() throws Exception { MapperService mapperService = createMapperService(""" { "_doc" : { "_source" : { "mode" : "synthetic" } } } """); - SourceFieldMapper mapper = mapperService.documentMapper().sourceMapper(); assertTrue(mapper.enabled()); assertTrue(mapper.isSynthetic()); @@ -230,11 +233,13 @@ public void testSyntheticUpdates() throws Exception { Exception e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, """ { "_doc" : { "_source" : { "mode" : "stored" } } } """)); + assertThat(e.getMessage(), containsString("Cannot update parameter [mode] from [synthetic] to [stored]")); merge(mapperService, """ { "_doc" : { "_source" : { "mode" : "disabled" } } } """); + mapper = mapperService.documentMapper().sourceMapper(); assertFalse(mapper.enabled()); assertFalse(mapper.isSynthetic()); @@ -247,14 +252,14 @@ public void testSyntheticSourceInTimeSeries() throws IOException { }); DocumentMapper mapper = createTimeSeriesModeDocumentMapper(mapping); assertTrue(mapper.sourceMapper().isSynthetic()); - assertEquals("{\"_source\":{\"mode\":\"synthetic\"}}", mapper.sourceMapper().toString()); + assertEquals("{\"_source\":{}}", mapper.sourceMapper().toString()); } public void testSyntheticSourceWithLogsIndexMode() throws IOException { XContentBuilder mapping = fieldMapping(b -> { b.field("type", "keyword"); }); DocumentMapper mapper = createLogsModeDocumentMapper(mapping); assertTrue(mapper.sourceMapper().isSynthetic()); - assertEquals("{\"_source\":{\"mode\":\"synthetic\"}}", mapper.sourceMapper().toString()); + assertEquals("{\"_source\":{}}", mapper.sourceMapper().toString()); } public void testSupportsNonDefaultParameterValues() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValuesTests.java new file mode 100644 index 0000000000000..435baa477e740 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/MultiDenseVectorScriptDocValuesTests.java @@ -0,0 +1,374 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper.vectors; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.script.field.vectors.ByteMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.FloatMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.MultiDenseVector; +import org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField; +import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Iterator; + +import static org.hamcrest.Matchers.containsString; + +public class MultiDenseVectorScriptDocValuesTests extends ESTestCase { + + @BeforeClass + public static void setup() { + assumeTrue("Requires multi-dense vector support", MultiDenseVectorFieldMapper.FEATURE_FLAG.isEnabled()); + } + + public void testFloatGetVectorValueAndGetMagnitude() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] expectedMagnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT); + BinaryDocValues magnitudeValues = wrap(expectedMagnitudes); + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + assertEquals(vectors[i].length, field.size()); + assertEquals(dims, scriptDocValues.dims()); + Iterator iterator = scriptDocValues.getVectorValues(); + float[] magnitudes = scriptDocValues.getMagnitudes(); + assertEquals(expectedMagnitudes[i].length, magnitudes.length); + for (int j = 0; j < vectors[i].length; j++) { + assertTrue(iterator.hasNext()); + assertArrayEquals(vectors[i][j], iterator.next(), 0.0001f); + assertEquals(expectedMagnitudes[i][j], magnitudes[j], 0.0001f); + } + } + } + + public void testByteGetVectorValueAndGetMagnitude() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] expectedMagnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE); + BinaryDocValues magnitudeValues = wrap(expectedMagnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + assertEquals(vectors[i].length, field.size()); + assertEquals(dims, scriptDocValues.dims()); + Iterator iterator = scriptDocValues.getVectorValues(); + float[] magnitudes = scriptDocValues.getMagnitudes(); + assertEquals(expectedMagnitudes[i].length, magnitudes.length); + for (int j = 0; j < vectors[i].length; j++) { + assertTrue(iterator.hasNext()); + assertArrayEquals(vectors[i][j], iterator.next(), 0.0001f); + assertEquals(expectedMagnitudes[i][j], magnitudes[j], 0.0001f); + } + } + } + + public void testFloatMetadataAndIterator() throws IOException { + int dims = 3; + float[][][] vectors = new float[][][] { fill(new float[3][dims], ElementType.FLOAT), fill(new float[2][dims], ElementType.FLOAT) }; + float[][] magnitudes = new float[][] { new float[3], new float[2] }; + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT); + BinaryDocValues magnitudeValues = wrap(magnitudes); + + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + MultiDenseVector dv = field.get(); + assertEquals(vectors[i].length, dv.size()); + assertFalse(dv.isEmpty()); + assertEquals(dims, dv.getDims()); + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, field::iterator); + assertEquals("Cannot iterate over single valued multi_dense_vector field, use get() instead", e.getMessage()); + } + field.setNextDocId(vectors.length); + MultiDenseVector dv = field.get(); + assertEquals(dv, MultiDenseVector.EMPTY); + } + + public void testByteMetadataAndIterator() throws IOException { + int dims = 3; + float[][][] vectors = new float[][][] { fill(new float[3][dims], ElementType.BYTE), fill(new float[2][dims], ElementType.BYTE) }; + float[][] magnitudes = new float[][] { new float[3], new float[2] }; + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + for (int i = 0; i < vectors.length; i++) { + field.setNextDocId(i); + MultiDenseVector dv = field.get(); + assertEquals(vectors[i].length, dv.size()); + assertFalse(dv.isEmpty()); + assertEquals(dims, dv.getDims()); + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, field::iterator); + assertEquals("Cannot iterate over single valued multi_dense_vector field, use get() instead", e.getMessage()); + } + field.setNextDocId(vectors.length); + MultiDenseVector dv = field.get(); + assertEquals(dv, MultiDenseVector.EMPTY); + } + + protected float[][] fill(float[][] vectors, ElementType elementType) { + for (float[] vector : vectors) { + for (int i = 0; i < vector.length; i++) { + vector[i] = elementType == ElementType.FLOAT ? randomFloat() : randomByte(); + } + } + return vectors; + } + + public void testFloatMissingValues() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(3); + assertEquals(0, field.size()); + Exception e = expectThrows(IllegalArgumentException.class, scriptDocValues::getVectorValues); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, scriptDocValues::getMagnitudes); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + } + + public void testByteMissingValues() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(3); + assertEquals(0, field.size()); + Exception e = expectThrows(IllegalArgumentException.class, scriptDocValues::getVectorValues); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, scriptDocValues::getMagnitudes); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + } + + public void testFloatGetFunctionIsNotAccessible() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new FloatMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.FLOAT, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(0); + Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); + assertThat( + e.getMessage(), + containsString( + "accessing a multi-vector field's value through 'get' or 'value' is not supported," + + " use 'vectorValues' or 'magnitudes' instead." + ) + ); + } + + public void testByteGetFunctionIsNotAccessible() throws IOException { + int dims = 3; + float[][][] vectors = { { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }, { { 1, 0, 2 } } }; + float[][] magnitudes = { { 1.7320f, 2.4495f, 3.3166f }, { 2.2361f } }; + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE); + BinaryDocValues magnitudeValues = wrap(magnitudes); + MultiDenseVectorDocValuesField field = new ByteMultiDenseVectorDocValuesField( + docValues, + magnitudeValues, + "test", + ElementType.BYTE, + dims + ); + MultiDenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); + + field.setNextDocId(0); + Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); + assertThat( + e.getMessage(), + containsString( + "accessing a multi-vector field's value through 'get' or 'value' is not supported," + + " use 'vectorValues' or 'magnitudes' instead." + ) + ); + } + + public static BinaryDocValues wrap(float[][] magnitudes) { + return new BinaryDocValues() { + int idx = -1; + int maxIdx = magnitudes.length; + + @Override + public BytesRef binaryValue() { + if (idx >= maxIdx) { + throw new IllegalStateException("max index exceeded"); + } + ByteBuffer magnitudeBuffer = ByteBuffer.allocate(magnitudes[idx].length * Float.BYTES).order(ByteOrder.LITTLE_ENDIAN); + for (float magnitude : magnitudes[idx]) { + magnitudeBuffer.putFloat(magnitude); + } + return new BytesRef(magnitudeBuffer.array()); + } + + @Override + public boolean advanceExact(int target) { + idx = target; + if (target < maxIdx) { + return true; + } + return false; + } + + @Override + public int docID() { + return idx; + } + + @Override + public int nextDoc() { + return idx++; + } + + @Override + public int advance(int target) { + throw new IllegalArgumentException("not defined!"); + } + + @Override + public long cost() { + throw new IllegalArgumentException("not defined!"); + } + }; + } + + public static BinaryDocValues wrap(float[][][] vectors, ElementType elementType) { + return new BinaryDocValues() { + int idx = -1; + int maxIdx = vectors.length; + + @Override + public BytesRef binaryValue() { + if (idx >= maxIdx) { + throw new IllegalStateException("max index exceeded"); + } + return mockEncodeDenseVector(vectors[idx], elementType, IndexVersion.current()); + } + + @Override + public boolean advanceExact(int target) { + idx = target; + if (target < maxIdx) { + return true; + } + return false; + } + + @Override + public int docID() { + return idx; + } + + @Override + public int nextDoc() { + return idx++; + } + + @Override + public int advance(int target) { + throw new IllegalArgumentException("not defined!"); + } + + @Override + public long cost() { + throw new IllegalArgumentException("not defined!"); + } + }; + } + + public static BytesRef mockEncodeDenseVector(float[][] values, ElementType elementType, IndexVersion indexVersion) { + int dims = values[0].length; + if (elementType == ElementType.BIT) { + dims *= Byte.SIZE; + } + int numBytes = elementType.getNumBytes(dims); + ByteBuffer byteBuffer = elementType.createByteBuffer(indexVersion, numBytes * values.length); + for (float[] vector : values) { + for (float value : vector) { + if (elementType == ElementType.FLOAT) { + byteBuffer.putFloat(value); + } else if (elementType == ElementType.BYTE || elementType == ElementType.BIT) { + byteBuffer.put((byte) value); + } else { + throw new IllegalStateException("unknown element_type [" + elementType + "]"); + } + } + } + return new BytesRef(byteBuffer.array()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index fdc18264e2299..dc70c44a89128 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -384,7 +384,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceSearchLookup() throws IOException { // Build a mapping using synthetic source - SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false).setSynthetic().build(); + SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, Settings.EMPTY, false, false).setSynthetic().build(); RootObjectMapper root = new RootObjectMapper.Builder("_doc", Optional.empty()).add( new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); diff --git a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index dabc8672733e2..0e8c7e0857251 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -65,11 +65,12 @@ public void testReadStringProperty() { } public void testReadStringPropertyInvalidType() { - try { - ConfigurationUtils.readStringProperty(null, null, config, "arr"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[arr] property isn't a string, but of type [java.util.Arrays$ArrayList]")); - } + ElasticsearchParseException caught = assertThrows( + ElasticsearchParseException.class, + () -> ConfigurationUtils.readStringProperty(null, null, config, "arr") + ); + assertThat(caught.getMessage(), equalTo("[arr] property isn't a string, but of type [java.util.Arrays$ArrayList]")); + } public void testReadBooleanProperty() { @@ -83,11 +84,11 @@ public void testReadNullBooleanProperty() { } public void testReadBooleanPropertyInvalidType() { - try { - ConfigurationUtils.readBooleanProperty(null, null, config, "arr", true); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[arr] property isn't a boolean, but of type [java.util.Arrays$ArrayList]")); - } + ElasticsearchParseException caught = assertThrows( + ElasticsearchParseException.class, + () -> ConfigurationUtils.readBooleanProperty(null, null, config, "arr", true) + ); + assertThat(caught.getMessage(), equalTo("[arr] property isn't a boolean, but of type [java.util.Arrays$ArrayList]")); } public void testReadStringOrIntProperty() { @@ -98,11 +99,11 @@ public void testReadStringOrIntProperty() { } public void testReadStringOrIntPropertyInvalidType() { - try { - ConfigurationUtils.readStringOrIntProperty(null, null, config, "arr", null); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("[arr] property isn't a string or int, but of type [java.util.Arrays$ArrayList]")); - } + ElasticsearchParseException caught = assertThrows( + ElasticsearchParseException.class, + () -> ConfigurationUtils.readStringOrIntProperty(null, null, config, "arr", null) + ); + assertThat(caught.getMessage(), equalTo("[arr] property isn't a string or int, but of type [java.util.Arrays$ArrayList]")); } public void testReadMediaProperty() { diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java index b62fff2eceb28..8235c66ef976b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java @@ -56,8 +56,8 @@ public void testFromXContent() throws IOException { assertEquals(2, custom.getPipelines().size()); assertEquals("1", custom.getPipelines().get("1").getId()); assertEquals("2", custom.getPipelines().get("2").getId()); - assertEquals(pipeline.getConfigAsMap(), custom.getPipelines().get("1").getConfigAsMap()); - assertEquals(pipeline2.getConfigAsMap(), custom.getPipelines().get("2").getConfigAsMap()); + assertEquals(pipeline.getConfig(), custom.getPipelines().get("1").getConfig()); + assertEquals(pipeline2.getConfig(), custom.getPipelines().get("2").getConfig()); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java index 202c4edb2d0c8..7be6e97762ccf 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java @@ -26,26 +26,57 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; import java.util.function.Predicate; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class PipelineConfigurationTests extends AbstractXContentTestCase { + public void testConfigInvariants() { + Map original = Map.of("a", 1); + Map mutable = new HashMap<>(original); + PipelineConfiguration configuration = new PipelineConfiguration("1", mutable); + // the config is equal to the original & mutable map, regardless of how you get a reference to it + assertThat(configuration.getConfig(), equalTo(original)); + assertThat(configuration.getConfig(), equalTo(mutable)); + assertThat(configuration.getConfig(), equalTo(configuration.getConfig(false))); + assertThat(configuration.getConfig(), equalTo(configuration.getConfig(true))); + // the config is the same instance as itself when unmodifiable is true + assertThat(configuration.getConfig(), sameInstance(configuration.getConfig())); + assertThat(configuration.getConfig(), sameInstance(configuration.getConfig(true))); + // but it's not the same instance as the original mutable map, nor if unmodifiable is false + assertThat(configuration.getConfig(), not(sameInstance(mutable))); + assertThat(configuration.getConfig(), not(sameInstance(configuration.getConfig(false)))); + + // changing the mutable map doesn't alter the pipeline's configuration + mutable.put("b", 2); + assertThat(configuration.getConfig(), equalTo(original)); + + // the modifiable map can be modified + Map modifiable = configuration.getConfig(false); + modifiable.put("c", 3); // this doesn't throw an exception + assertThat(modifiable.get("c"), equalTo(3)); + // but the next modifiable copy is a new fresh copy, and doesn't reflect those changes + assertThat(configuration.getConfig(), equalTo(configuration.getConfig(false))); + } + public void testSerialization() throws IOException { PipelineConfiguration configuration = new PipelineConfiguration( "1", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), XContentType.JSON ); - assertEquals(XContentType.JSON, configuration.getXContentType()); - + assertThat(configuration.getConfig(), anEmptyMap()); BytesStreamOutput out = new BytesStreamOutput(); configuration.writeTo(out); StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); PipelineConfiguration serialized = PipelineConfiguration.readFrom(in); - assertEquals(XContentType.JSON, serialized.getXContentType()); - assertEquals("{}", serialized.getConfig().utf8ToString()); + assertThat(serialized.getConfig(), anEmptyMap()); } public void testMetaSerialization() throws IOException { @@ -56,13 +87,14 @@ public void testMetaSerialization() throws IOException { new BytesArray(configJson.getBytes(StandardCharsets.UTF_8)), XContentType.JSON ); - assertEquals(XContentType.JSON, configuration.getXContentType()); BytesStreamOutput out = new BytesStreamOutput(); configuration.writeTo(out); StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); PipelineConfiguration serialized = PipelineConfiguration.readFrom(in); - assertEquals(XContentType.JSON, serialized.getXContentType()); - assertEquals(configJson, serialized.getConfig().utf8ToString()); + assertEquals( + XContentHelper.convertToMap(new BytesArray(configJson.getBytes(StandardCharsets.UTF_8)), true, XContentType.JSON).v2(), + serialized.getConfig() + ); } public void testParser() throws IOException { @@ -80,9 +112,8 @@ public void testParser() throws IOException { XContentParser xContentParser = xContentType.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput()); PipelineConfiguration parsed = parser.parse(xContentParser, null); - assertEquals(xContentType.canonical(), parsed.getXContentType()); - assertEquals("{}", XContentHelper.convertToJson(parsed.getConfig(), false, parsed.getXContentType())); - assertEquals("1", parsed.getId()); + assertThat(parsed.getId(), equalTo("1")); + assertThat(parsed.getConfig(), anEmptyMap()); } public void testGetVersion() { diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java index 93c40185f62ac..37eb69c0ca409 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/HotThreadsTests.java @@ -947,7 +947,7 @@ private static String innerDetect( long currentThreadId ) throws Exception { try (var writer = new StringWriter()) { - hotThreads.innerDetect(mockedMthreadMXBeanBean, sunThreadInfo, currentThreadId, (interval) -> null, writer); + hotThreads.innerDetect(mockedMthreadMXBeanBean, sunThreadInfo, currentThreadId, writer, () -> {}); return writer.toString(); } } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java new file mode 100644 index 0000000000000..059cb15551acb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsLoaderTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class PluginsLoaderTests extends ESTestCase { + + public void testToModuleName() { + assertThat(PluginsLoader.toModuleName("module.name"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("module-name"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("module-name1"), equalTo("module.name1")); + assertThat(PluginsLoader.toModuleName("1module-name"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("module-name!"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("module!@#name!"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("!module-name!"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("module_name"), equalTo("module_name")); + assertThat(PluginsLoader.toModuleName("-module-name-"), equalTo("module.name")); + assertThat(PluginsLoader.toModuleName("_module_name"), equalTo("_module_name")); + assertThat(PluginsLoader.toModuleName("_"), equalTo("_")); + } +} diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index f927a12b50da3..79d8f98c7dca6 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexModule; -import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.plugin.analysis.CharFilterFactory; import org.elasticsearch.plugins.scanners.PluginInfo; import org.elasticsearch.plugins.spi.BarPlugin; @@ -66,12 +65,11 @@ public class PluginsServiceTests extends ESTestCase { public static class FilterablePlugin extends Plugin implements ScriptPlugin {} static PluginsService newPluginsService(Settings settings) { - return new PluginsService(settings, null, null, TestEnvironment.newEnvironment(settings).pluginsFile()) { - @Override - protected void addServerExportsService(Map> qualifiedExports) { - // tests don't run modular - } - }; + return new PluginsService( + settings, + null, + PluginsLoader.createPluginsLoader(null, TestEnvironment.newEnvironment(settings).pluginsFile(), false) + ); } static PluginsService newMockPluginsService(List> classpathPlugins) { @@ -466,7 +464,8 @@ public void testExtensiblePlugin() { List.of( new PluginsService.LoadedPlugin( new PluginDescriptor("extensible", null, null, null, null, classname, null, List.of(), false, false, false, false), - extensiblePlugin + extensiblePlugin, + null ) ) ); @@ -480,7 +479,8 @@ public void testExtensiblePlugin() { List.of( new PluginsService.LoadedPlugin( new PluginDescriptor("extensible", null, null, null, null, classname, null, List.of(), false, false, false, false), - extensiblePlugin + extensiblePlugin, + null ), new PluginsService.LoadedPlugin( new PluginDescriptor( @@ -497,7 +497,8 @@ public void testExtensiblePlugin() { false, false ), - testPlugin + testPlugin, + null ) ) ); @@ -875,20 +876,6 @@ public void testCanCreateAClassLoader() { assertEquals(this.getClass().getClassLoader(), loader.getParent()); } - public void testToModuleName() { - assertThat(PluginsService.toModuleName("module.name"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("module-name"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("module-name1"), equalTo("module.name1")); - assertThat(PluginsService.toModuleName("1module-name"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("module-name!"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("module!@#name!"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("!module-name!"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("module_name"), equalTo("module_name")); - assertThat(PluginsService.toModuleName("-module-name-"), equalTo("module.name")); - assertThat(PluginsService.toModuleName("_module_name"), equalTo("_module_name")); - assertThat(PluginsService.toModuleName("_"), equalTo("_")); - } - static final class Loader extends ClassLoader { Loader(ClassLoader parent) { super(parent); @@ -896,22 +883,25 @@ static final class Loader extends ClassLoader { } // Closes the URLClassLoaders and UberModuleClassloaders of plugins loaded by the given plugin service. + // We can use the direct ClassLoader from the plugin because tests do not use any parent SPI ClassLoaders. static void closePluginLoaders(PluginsService pluginService) { for (var lp : pluginService.plugins()) { - if (lp.loader() instanceof URLClassLoader urlClassLoader) { + if (lp.classLoader() instanceof URLClassLoader urlClassLoader) { try { PrivilegedOperations.closeURLClassLoader(urlClassLoader); } catch (IOException unexpected) { throw new UncheckedIOException(unexpected); } - } - if (lp.loader() instanceof UberModuleClassLoader loader) { + } else if (lp.classLoader() instanceof UberModuleClassLoader loader) { try { PrivilegedOperations.closeURLClassLoader(loader.getInternalLoader()); } catch (Exception e) { throw new RuntimeException(e); } + } else { + throw new AssertionError("Cannot close unexpected classloader " + lp.classLoader()); } + } } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java index aa47f3c066f57..7f6885e7a977f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java @@ -65,14 +65,26 @@ public void setUp() throws Exception { } public void testCanRegisterTwoRepositoriesWithDifferentTypes() { - when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); - when(plugin2.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type2", factory)); + when( + plugin1.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin2.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type2", factory)); // Would throw new RepositoriesModule( @@ -83,18 +95,32 @@ public void testCanRegisterTwoRepositoriesWithDifferentTypes() { mock(ClusterService.class), MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP); + recoverySettings, + TelemetryProvider.NOOP + ); } public void testCannotRegisterTwoRepositoriesWithSameTypes() { - when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); - when(plugin2.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin1.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin2.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -106,7 +132,9 @@ public void testCannotRegisterTwoRepositoriesWithSameTypes() { clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP) + recoverySettings, + TelemetryProvider.NOOP + ) ); assertEquals("Repository type [type1] is already registered", ex.getMessage()); @@ -130,17 +158,25 @@ public void testCannotRegisterTwoInternalRepositoriesWithSameTypes() { clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP) + recoverySettings, + TelemetryProvider.NOOP + ) ); assertEquals("Internal repository type [type1] is already registered", ex.getMessage()); } public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() { - when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), - eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), - any(RepositoriesMetrics.class))) - .thenReturn(Collections.singletonMap("type1", factory)); + when( + plugin1.getRepositories( + eq(environment), + eq(contentRegistry), + eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), + eq(recoverySettings), + any(RepositoriesMetrics.class) + ) + ).thenReturn(Collections.singletonMap("type1", factory)); when(plugin2.getInternalRepositories(environment, contentRegistry, clusterService, recoverySettings)).thenReturn( Collections.singletonMap("type1", factory) ); @@ -155,7 +191,9 @@ public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() { clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, contentRegistry, - recoverySettings, TelemetryProvider.NOOP) + recoverySettings, + TelemetryProvider.NOOP + ) ); assertEquals("Internal repository type [type1] is already registered as a non-internal repository", ex.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java new file mode 100644 index 0000000000000..03d1adff42c4e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.reservedstate.service; + +import org.elasticsearch.health.HealthIndicatorDetails; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.SimpleHealthIndicatorDetails; +import org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.health.HealthStatus.GREEN; +import static org.elasticsearch.health.HealthStatus.YELLOW; +import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.FAILURE_SYMPTOM; +import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.NO_CHANGES_SYMPTOM; +import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.STALE_SETTINGS_IMPACT; +import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.SUCCESS_SYMPTOM; + +/** + * Here, we test {@link FileSettingsHealthIndicatorService} in isolation; + * we do not test that {@link FileSettingsService} uses it correctly. + */ +public class FileSettingsHealthIndicatorServiceTests extends ESTestCase { + + FileSettingsHealthIndicatorService healthIndicatorService; + + @Before + public void initialize() { + healthIndicatorService = new FileSettingsHealthIndicatorService(); + } + + public void testInitiallyGreen() { + assertEquals( + new HealthIndicatorResult("file_settings", GREEN, NO_CHANGES_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()), + healthIndicatorService.calculate(false, null) + ); + } + + public void testGreenYellowYellowGreen() { + healthIndicatorService.changeOccurred(); + // This is a strange case: a change occurred, but neither success nor failure have been reported yet. + // While the change is still in progress, we don't change the status. + assertEquals( + new HealthIndicatorResult("file_settings", GREEN, SUCCESS_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()), + healthIndicatorService.calculate(false, null) + ); + + healthIndicatorService.failureOccurred("whoopsie 1"); + assertEquals( + new HealthIndicatorResult( + "file_settings", + YELLOW, + FAILURE_SYMPTOM, + new SimpleHealthIndicatorDetails(Map.of("failure_streak", 1L, "most_recent_failure", "whoopsie 1")), + STALE_SETTINGS_IMPACT, + List.of() + ), + healthIndicatorService.calculate(false, null) + ); + + healthIndicatorService.failureOccurred("whoopsie #2"); + assertEquals( + new HealthIndicatorResult( + "file_settings", + YELLOW, + FAILURE_SYMPTOM, + new SimpleHealthIndicatorDetails(Map.of("failure_streak", 2L, "most_recent_failure", "whoopsie #2")), + STALE_SETTINGS_IMPACT, + List.of() + ), + healthIndicatorService.calculate(false, null) + ); + + healthIndicatorService.successOccurred(); + assertEquals( + new HealthIndicatorResult("file_settings", GREEN, SUCCESS_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()), + healthIndicatorService.calculate(false, null) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 0db29588c4298..ae60a21b6fc22 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NodeConnectionsService; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -33,6 +34,7 @@ import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; +import org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -78,6 +80,8 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; public class FileSettingsServiceTests extends ESTestCase { private static final Logger logger = LogManager.getLogger(FileSettingsServiceTests.class); @@ -86,6 +90,7 @@ public class FileSettingsServiceTests extends ESTestCase { private ReservedClusterStateService controller; private ThreadPool threadpool; private FileSettingsService fileSettingsService; + private FileSettingsHealthIndicatorService healthIndicatorService; @Before public void setUp() throws Exception { @@ -131,7 +136,8 @@ public void setUp() throws Exception { List.of(new ReservedClusterSettingsAction(clusterSettings)) ) ); - fileSettingsService = spy(new FileSettingsService(clusterService, controller, env)); + healthIndicatorService = mock(FileSettingsHealthIndicatorService.class); + fileSettingsService = spy(new FileSettingsService(clusterService, controller, env, healthIndicatorService)); } @After @@ -162,6 +168,7 @@ public void testStartStop() { assertTrue(fileSettingsService.watching()); fileSettingsService.stop(); assertFalse(fileSettingsService.watching()); + verifyNoInteractions(healthIndicatorService); } public void testOperatorDirName() { @@ -208,6 +215,10 @@ public void testInitialFileError() throws Exception { verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); // assert we never notified any listeners of successful application of file based settings assertFalse(settingsChanged.get()); + + verify(healthIndicatorService, times(1)).changeOccurred(); + verify(healthIndicatorService, times(1)).failureOccurred(argThat(s -> s.startsWith(IllegalStateException.class.getName()))); + verifyNoMoreInteractions(healthIndicatorService); } @SuppressWarnings("unchecked") @@ -232,6 +243,10 @@ public void testInitialFileWorks() throws Exception { verify(fileSettingsService, times(1)).processFileOnServiceStart(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); + + verify(healthIndicatorService, times(1)).changeOccurred(); + verify(healthIndicatorService, times(1)).successOccurred(); + verifyNoMoreInteractions(healthIndicatorService); } @SuppressWarnings("unchecked") @@ -267,6 +282,10 @@ public void testProcessFileChanges() throws Exception { verify(fileSettingsService, times(1)).processFileChanges(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_VERSION_ONLY), any()); + + verify(healthIndicatorService, times(2)).changeOccurred(); + verify(healthIndicatorService, times(2)).successOccurred(); + verifyNoMoreInteractions(healthIndicatorService); } @SuppressWarnings("unchecked") @@ -321,6 +340,11 @@ public void testInvalidJSON() throws Exception { // Note: the name "processFileOnServiceStart" is a bit misleading because it is not // referring to fileSettingsService.start(). Rather, it is referring to the initialization // of the watcher thread itself, which occurs asynchronously when clusterChanged is first called. + + verify(healthIndicatorService, times(2)).changeOccurred(); + verify(healthIndicatorService, times(1)).successOccurred(); + verify(healthIndicatorService, times(1)).failureOccurred(argThat(s -> s.startsWith(IllegalArgumentException.class.getName()))); + verifyNoMoreInteractions(healthIndicatorService); } private static void awaitOrBust(CyclicBarrier barrier) { @@ -373,6 +397,12 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { fileSettingsService.close(); // let the deadlocked thread end, so we can cleanly exit the test deadThreadLatch.countDown(); + + verify(healthIndicatorService, times(1)).changeOccurred(); + verify(healthIndicatorService, times(1)).failureOccurred( + argThat(s -> s.startsWith(FailedToCommitClusterStateException.class.getName())) + ); + verifyNoMoreInteractions(healthIndicatorService); } public void testHandleSnapshotRestoreClearsMetadata() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 8a8bed9ca73db..9f82911ed121f 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -73,7 +73,7 @@ public List routes() { params.put("consumed", randomAlphaOfLength(8)); params.put("unconsumed", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, true, 1); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -108,7 +108,7 @@ public List routes() { params.put("unconsumed-first", randomAlphaOfLength(8)); params.put("unconsumed-second", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, true, 1); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -155,7 +155,7 @@ public List routes() { params.put("very_close_to_parametre", randomAlphaOfLength(8)); params.put("very_far_from_every_consumed_parameter", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, true, 1); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -206,7 +206,7 @@ public List routes() { params.put("consumed", randomAlphaOfLength(8)); params.put("response_param", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, true, 1); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -238,7 +238,7 @@ public List routes() { params.put("human", null); params.put("error_trace", randomFrom("true", "false", null)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, true, 1); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -283,7 +283,7 @@ public List routes() { params.put("size", randomAlphaOfLength(8)); params.put("time", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, true, 1); + RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -314,7 +314,7 @@ public List routes() { new BytesArray(builder.toString()), XContentType.JSON ).build(); - final RestChannel channel = new FakeRestChannel(request, true, 1); + final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -341,7 +341,7 @@ public List routes() { }; final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).build(); - final RestChannel channel = new FakeRestChannel(request, true, 1); + final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -371,7 +371,7 @@ public List routes() { new BytesArray(builder.toString()), XContentType.JSON ).build(); - final RestChannel channel = new FakeRestChannel(request, true, 1); + final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) diff --git a/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java b/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java index 907c16aad5fdc..eece90ed94cf9 100644 --- a/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java +++ b/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java @@ -56,7 +56,7 @@ public void testEncodesChunkedXContentCorrectly() throws IOException { ToXContent.EMPTY_PARAMS, new FakeRestChannel( new FakeRestRequest.Builder(xContentRegistry()).withContent(BytesArray.EMPTY, randomXContent.type()).build(), - true, + randomBoolean(), 1 ) ); diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index afdad1045b4de..2fdb3daa26da4 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -161,7 +161,7 @@ public void testApplyProductSpecificResponseHeaders() { final ThreadContext threadContext = client.threadPool().getThreadContext(); final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); // the rest controller relies on the caller to stash the context, so we should expect these values here as we didn't stash the // context in this test @@ -180,7 +180,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "bar")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); assertTrue(channel.getSendResponseCalled()); } @@ -211,7 +211,7 @@ public String getName() { }); } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.OK); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy( eq(1L), @@ -235,7 +235,7 @@ public MethodHandlers next() { return null; } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.BAD_REQUEST); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -257,7 +257,7 @@ public MethodHandlers next() { } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.BAD_REQUEST); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -280,7 +280,7 @@ public String getName() { })); when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenAnswer(x -> handlers.iterator()); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.METHOD_NOT_ALLOWED); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.METHOD_NOT_ALLOWED); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 405))); } @@ -290,7 +290,7 @@ public void testDispatchBadRequestEmitsMetric() { final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchBadRequest(channel, threadContext, new Exception()); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -314,7 +314,7 @@ public MethodHandlers next() { return new MethodHandlers("/").addMethod(GET, RestApiVersion.current(), (request, channel, client) -> {}); } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); verify(tracer).startTrace( eq(threadContext), @@ -340,7 +340,7 @@ public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { new RestResponse(RestStatus.OK, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY) ) ); - AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRequest, randomBoolean(), RestStatus.OK); restController.dispatchRequest(fakeRequest, channel, threadContext); assertTrue(channel.getSendResponseCalled()); } @@ -466,7 +466,7 @@ public void testRestInterceptor() throws Exception { ); restController.registerHandler(new Route(GET, "/wrapped"), handler); RestRequest request = testRestRequest("/wrapped", "{}", XContentType.JSON); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); httpServerTransport.start(); assertThat(wrapperCalled.get(), is(true)); @@ -477,7 +477,7 @@ public void testDispatchRequestAddsAndFreesBytesOnSuccess() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, XContentType.JSON); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.OK); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -489,7 +489,7 @@ public void testDispatchRequestAddsAndFreesBytesOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/error", content, XContentType.JSON); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -502,7 +502,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); // we will produce an error in the rest handler and one more when sending the error response RestRequest request = testRestRequest("/error", content, XContentType.JSON); - ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true); + ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, randomBoolean()); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -521,7 +521,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnErrorDuringSend() { ); // we will produce an error in the rest handler and one more when sending the error response RestRequest request = testRestRequest("/foo", content, XContentType.JSON); - ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true) { + ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, randomBoolean()) { @Override protected BytesStream newBytesOutput() { return new RecyclerBytesStreamOutput(recycler); @@ -538,7 +538,7 @@ public void testDispatchRequestLimitsBytes() { int contentLength = BREAKER_LIMIT.bytesAsInt() + 1; String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, XContentType.JSON); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.TOO_MANY_REQUESTS); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.TOO_MANY_REQUESTS); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -549,7 +549,7 @@ public void testDispatchRequestLimitsBytes() { public void testDispatchRequiresContentTypeForRequestsWithContent() { String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, null); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.NOT_ACCEPTABLE); restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); restController.registerHandler( new Route(GET, "/"), @@ -566,7 +566,7 @@ public void testDispatchDoesNotRequireContentTypeForRequestsWithoutContent() { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); assertFalse(channel.getSendResponseCalled()); restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); @@ -582,7 +582,7 @@ public void testDispatchFailsWithPlainText() { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.NOT_ACCEPTABLE); restController.registerHandler( new Route(GET, "/foo"), (request, channel1, client) -> channel1.sendResponse( @@ -603,7 +603,7 @@ public void testDispatchUnsupportedContentType() { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.NOT_ACCEPTABLE); assertFalse(channel.getSendResponseCalled()); restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); @@ -620,7 +620,7 @@ public void testDispatchWorksWithNewlineDelimitedJson() { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { @Override public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { @@ -659,7 +659,7 @@ public void testDispatchWithContentStream() { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { @Override public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { @@ -683,7 +683,7 @@ public void testDispatchWithContentStreamNoContentType() { RestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray("{}"), null) .withPath("/foo") .build(); - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.NOT_ACCEPTABLE); if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } @@ -712,7 +712,7 @@ public void testNonStreamingXContentCausesErrorResponse() throws IOException { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.NOT_ACCEPTABLE); restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { @Override public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { @@ -737,7 +737,7 @@ public void testUnknownContentWithContentStream() { if (randomBoolean()) { fakeRestRequest = new RestRequest(fakeRestRequest); } - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.NOT_ACCEPTABLE); restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { @Override public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { @@ -756,7 +756,7 @@ public boolean supportsBulkContent() { public void testDispatchBadRequest() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchBadRequest( channel, client.threadPool().getThreadContext(), @@ -789,7 +789,7 @@ public boolean canTripCircuitBreaker() { .withContent(BytesReference.bytes(content), content.contentType()) .build(); - final AssertingChannel channel = new AssertingChannel(restRequest, true, RestStatus.OK); + final AssertingChannel channel = new AssertingChannel(restRequest, randomBoolean(), RestStatus.OK); assertFalse(channel.getSendResponseCalled()); assertFalse(restRequest.isContentConsumed()); @@ -801,7 +801,7 @@ public boolean canTripCircuitBreaker() { public void testDispatchBadRequestUnknownCause() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.BAD_REQUEST); restController.dispatchBadRequest(channel, client.threadPool().getThreadContext(), null); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().content().utf8ToString(), containsString("unknown cause")); @@ -813,14 +813,14 @@ public void testDispatchBadRequestWithValidationException() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); // it's always a 400 bad request when dispatching "regular" {@code ElasticsearchException} - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.BAD_REQUEST); assertFalse(channel.getSendResponseCalled()); restController.dispatchBadRequest(channel, client.threadPool().getThreadContext(), exception); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().content().utf8ToString(), containsString("bad bad exception")); // but {@code HttpHeadersValidationException} do carry over the rest response code - channel = new AssertingChannel(fakeRestRequest, true, status); + channel = new AssertingChannel(fakeRestRequest, randomBoolean(), status); assertFalse(channel.getSendResponseCalled()); restController.dispatchBadRequest(channel, client.threadPool().getThreadContext(), new HttpHeadersValidationException(exception)); assertTrue(channel.getSendResponseCalled()); @@ -831,7 +831,7 @@ public void testFavicon() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withMethod(GET) .withPath("/favicon.ico") .build(); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().contentType(), containsString("image/x-icon")); @@ -841,7 +841,7 @@ public void testFaviconWithWrongHttpMethod() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withMethod( randomValueOtherThanMany(m -> m == GET || m == OPTIONS, () -> randomFrom(RestRequest.Method.values())) ).withPath("/favicon.ico").build(); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.METHOD_NOT_ALLOWED); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.METHOD_NOT_ALLOWED); restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().getHeaders().containsKey("Allow"), equalTo(true)); @@ -906,18 +906,13 @@ public HttpResponse createResponse(RestStatus status, ChunkedRestResponseBodyPar @Override public void release() {} - @Override - public HttpRequest releaseAndCopy() { - return this; - } - @Override public Exception getInboundException() { return null; } }, null); - final AssertingChannel channel = new AssertingChannel(request, true, RestStatus.METHOD_NOT_ALLOWED); + final AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.METHOD_NOT_ALLOWED); assertFalse(channel.getSendResponseCalled()); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); assertTrue(channel.getSendResponseCalled()); @@ -937,7 +932,7 @@ public Method method() { } }; - final AssertingChannel channel = new AssertingChannel(request, true, RestStatus.METHOD_NOT_ALLOWED); + final AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.METHOD_NOT_ALLOWED); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); verify(tracer).startTrace(any(), any(RestRequest.class), anyString(), anyMap()); verify(tracer).addError(any(RestRequest.class), any(IllegalArgumentException.class)); @@ -951,7 +946,7 @@ public void testDispatchCompatibleHandler() { final String mediaType = randomCompatibleMediaType(version); FakeRestRequest fakeRestRequest = requestWithContent(mediaType); - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); // dispatch to a compatible handler restController.registerHandler(GET, "/foo", RestApiVersion.minimumSupported(), (request, channel1, client) -> { @@ -975,7 +970,7 @@ public void testDispatchCompatibleRequestToNewlyAddedHandler() { final String mediaType = randomCompatibleMediaType(version); FakeRestRequest fakeRestRequest = requestWithContent(mediaType); - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); // dispatch to a CURRENT newly added handler restController.registerHandler(new Route(GET, "/foo"), (request, channel1, client) -> { @@ -1018,7 +1013,7 @@ public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { final String mediaType = randomCompatibleMediaType(version); FakeRestRequest fakeRestRequest = requestWithContent(mediaType); - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); // dispatch to a CURRENT newly added handler restController.registerHandler(new Route(GET, "/foo"), (request, channel1, client) -> { @@ -1041,7 +1036,7 @@ public void testCustomMediaTypeValidation() { final String mediaType = "application/x-protobuf"; FakeRestRequest fakeRestRequest = requestWithContent(mediaType); - AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.OK); // register handler that handles custom media type validation restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { @@ -1068,7 +1063,7 @@ public void testBrowserSafelistedContentTypesAreRejected() { final String mediaType = randomFrom(RestController.SAFELISTED_MEDIA_TYPES); FakeRestRequest fakeRestRequest = requestWithContent(mediaType); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, randomBoolean(), RestStatus.NOT_ACCEPTABLE); restController.registerHandler(new Route(GET, "/foo"), new RestHandler() { @Override @@ -1115,7 +1110,7 @@ public void testApiProtectionWithServerlessDisabled() { List accessiblePaths = List.of("/public", "/internal", "/hidden"); accessiblePaths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); } @@ -1137,12 +1132,12 @@ public void testApiProtectionWithServerlessEnabledAsEndUser() { final Consumer> checkUnprotected = paths -> paths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); final Consumer> checkProtected = paths -> paths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, true, RestStatus.GONE); + AssertingChannel channel = new AssertingChannel(request, randomBoolean(), RestStatus.GONE); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); RestResponse restResponse = channel.getRestResponse(); diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 4345f3c5e3fb4..7fe2388ec5113 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -97,7 +97,7 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { RestRequest restRequest = fakeRestRequestBuilder.build(); // Send the request and verify the response status code - FakeRestChannel restChannel = new FakeRestChannel(restRequest, true, 1); + FakeRestChannel restChannel = new FakeRestChannel(restRequest, randomBoolean(), 1); restController.dispatchRequest(restRequest, restChannel, new ThreadContext(Settings.EMPTY)); assertThat(restChannel.capturedResponse().status().getStatus(), is(405)); diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index 8a0ca5ba6c8a5..b391b77503400 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.http.HttpBody; import org.elasticsearch.http.HttpChannel; @@ -321,7 +321,7 @@ public String uri() { } @Override - public BytesReference content() { + public ReleasableBytesReference content() { return restRequest.content(); } } diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index cfed83f352951..b85ad31288c8c 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -93,7 +93,6 @@ public void testWithHeaders() throws Exception { assertThat(response.getHeaders().get("n1"), contains("v11", "v12")); assertThat(response.getHeaders().get("n2"), notNullValue()); assertThat(response.getHeaders().get("n2"), contains("v21", "v22")); - assertChannelWarnings(channel); } public void testEmptyChunkedBody() { @@ -114,11 +113,11 @@ public void testSimpleExceptionMessage() throws Exception { Exception t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); RestResponse response = new RestResponse(channel, t); String text = response.content().utf8ToString(); - assertThat(text, containsString("ElasticsearchException[an error occurred reading data]")); - assertThat(text, not(containsString("FileNotFoundException"))); + assertThat(text, containsString(""" + {"type":"exception","reason":"an error occurred reading data"}""")); + assertThat(text, not(containsString("file_not_found_exception"))); assertThat(text, not(containsString("/foo/bar"))); assertThat(text, not(containsString("error_trace"))); - assertChannelWarnings(channel); } public void testDetailedExceptionMessage() throws Exception { @@ -134,20 +133,6 @@ public void testDetailedExceptionMessage() throws Exception { {"type":"file_not_found_exception","reason":"/foo/bar"}""")); } - public void testNonElasticsearchExceptionIsNotShownAsSimpleMessage() throws Exception { - RestRequest request = new FakeRestRequest(); - RestChannel channel = new SimpleExceptionRestChannel(request); - - Exception t = new UnknownException("an error occurred reading data", new FileNotFoundException("/foo/bar")); - RestResponse response = new RestResponse(channel, t); - String text = response.content().utf8ToString(); - assertThat(text, not(containsString("UnknownException[an error occurred reading data]"))); - assertThat(text, not(containsString("FileNotFoundException[/foo/bar]"))); - assertThat(text, not(containsString("error_trace"))); - assertThat(text, containsString("\"error\":\"No ElasticsearchException found\"")); - assertChannelWarnings(channel); - } - public void testErrorTrace() throws Exception { RestRequest request = new FakeRestRequest(); request.params().put("error_trace", "true"); @@ -177,7 +162,6 @@ public void testAuthenticationFailedNoStackTrace() throws IOException { RestResponse response = new RestResponse(channel, authnException); assertThat(response.status(), is(RestStatus.UNAUTHORIZED)); assertThat(response.content().utf8ToString(), not(containsString(ElasticsearchException.STACK_TRACE))); - assertChannelWarnings(channel); } } } @@ -202,7 +186,6 @@ public void testStackTrace() throws IOException { } else { assertThat(response.content().utf8ToString(), not(containsString(ElasticsearchException.STACK_TRACE))); } - assertChannelWarnings(channel); } } } @@ -232,9 +215,9 @@ public void testNullThrowable() throws Exception { RestResponse response = new RestResponse(channel, null); String text = response.content().utf8ToString(); - assertThat(text, containsString("\"error\":\"unknown\"")); + assertThat(text, containsString("\"type\":\"unknown\"")); + assertThat(text, containsString("\"reason\":\"unknown\"")); assertThat(text, not(containsString("error_trace"))); - assertChannelWarnings(channel); } public void testConvert() throws IOException { @@ -324,32 +307,26 @@ public void testErrorToAndFromXContent() throws IOException { original = new ElasticsearchException("ElasticsearchException without cause"); if (detailed) { addHeadersOrMetadata = randomBoolean(); - reason = "ElasticsearchException without cause"; - } else { - reason = "ElasticsearchException[ElasticsearchException without cause]"; } + reason = "ElasticsearchException without cause"; } case 1 -> { original = new ElasticsearchException("ElasticsearchException with a cause", new FileNotFoundException("missing")); if (detailed) { addHeadersOrMetadata = randomBoolean(); - type = "exception"; - reason = "ElasticsearchException with a cause"; cause = new ElasticsearchException("Elasticsearch exception [type=file_not_found_exception, reason=missing]"); - } else { - reason = "ElasticsearchException[ElasticsearchException with a cause]"; } + type = "exception"; + reason = "ElasticsearchException with a cause"; } case 2 -> { original = new ResourceNotFoundException("ElasticsearchException with custom status"); status = RestStatus.NOT_FOUND; if (detailed) { addHeadersOrMetadata = randomBoolean(); - type = "resource_not_found_exception"; - reason = "ElasticsearchException with custom status"; - } else { - reason = "ResourceNotFoundException[ElasticsearchException with custom status]"; } + type = "resource_not_found_exception"; + reason = "ElasticsearchException with custom status"; } case 3 -> { TransportAddress address = buildNewFakeTransportAddress(); @@ -360,12 +337,8 @@ public void testErrorToAndFromXContent() throws IOException { new ResourceAlreadyExistsException("ElasticsearchWrapperException with a cause that has a custom status") ); status = RestStatus.BAD_REQUEST; - if (detailed) { - type = "resource_already_exists_exception"; - reason = "ElasticsearchWrapperException with a cause that has a custom status"; - } else { - reason = "RemoteTransportException[[remote][" + address.toString() + "][action]]"; - } + type = "resource_already_exists_exception"; + reason = "ElasticsearchWrapperException with a cause that has a custom status"; } case 4 -> { original = new RemoteTransportException( @@ -373,23 +346,17 @@ public void testErrorToAndFromXContent() throws IOException { new IllegalArgumentException("wrong") ); status = RestStatus.BAD_REQUEST; - if (detailed) { - type = "illegal_argument_exception"; - reason = "wrong"; - } else { - reason = "RemoteTransportException[[ElasticsearchWrapperException with a cause that has a special treatment]]"; - } + type = "illegal_argument_exception"; + reason = "wrong"; } case 5 -> { status = randomFrom(RestStatus.values()); original = new ElasticsearchStatusException("ElasticsearchStatusException with random status", status); if (detailed) { addHeadersOrMetadata = randomBoolean(); - type = "status_exception"; - reason = "ElasticsearchStatusException with random status"; - } else { - reason = "ElasticsearchStatusException[ElasticsearchStatusException with random status]"; } + type = "status_exception"; + reason = "ElasticsearchStatusException with random status"; } default -> throw new UnsupportedOperationException("Failed to generate random exception"); } @@ -435,7 +402,6 @@ public void testErrorToAndFromXContent() throws IOException { assertEquals(expected.status(), parsedError.status()); assertDeepEquals(expected, parsedError); - assertChannelWarnings(channel); } public void testNoErrorFromXContent() throws IOException { @@ -502,7 +468,9 @@ public void testResponseContentTypeUponException() throws Exception { Exception t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); RestResponse response = new RestResponse(channel, t); assertThat(response.contentType(), equalTo(mediaType)); - assertChannelWarnings(channel); + assertWarnings( + "The JSON format of non-detailed errors has changed in Elasticsearch 9.0 to match the JSON structure used for detailed errors." + ); } public void testSupressedLogging() throws IOException { @@ -534,7 +502,6 @@ public void testSupressedLogging() throws IOException { "401", "unauthorized" ); - assertChannelWarnings(channel); } private void assertLogging( @@ -560,15 +527,6 @@ private void assertLogging( } } - private void assertChannelWarnings(RestChannel channel) { - if (channel.detailedErrorsEnabled() == false) { - assertWarnings( - "The JSON format of non-detailed errors will change in Elasticsearch 9.0" - + " to match the JSON structure used for detailed errors. To keep using the existing format, use the V8 REST API." - ); - } - } - public static class WithHeadersException extends ElasticsearchException { WithHeadersException() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java index 03ae366050646..827a07b89b2b8 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java @@ -26,7 +26,7 @@ public class RestBuilderListenerTests extends ESTestCase { public void testXContentBuilderClosedInBuildResponse() throws Exception { AtomicReference builderAtomicReference = new AtomicReference<>(); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), true, 1) + new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { @@ -44,7 +44,7 @@ public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws E public void testXContentBuilderNotClosedInBuildResponseAssertionsDisabled() throws Exception { AtomicReference builderAtomicReference = new AtomicReference<>(); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), true, 1) + new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { @@ -68,7 +68,7 @@ public void testXContentBuilderNotClosedInBuildResponseAssertionsEnabled() throw assumeTrue("tests are not being run with assertions", RestBuilderListener.class.desiredAssertionStatus()); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), true, 1) + new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java index 8104ecfc31c3d..dad6885a08fa8 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java @@ -34,7 +34,7 @@ public void testConsumesParameters() throws Exception { FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams( Map.of("parent_task_id", "the node:3", "nodes", "node1,node2", "actions", "*") ).build(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), 1); try (var threadPool = createThreadPool()) { final var nodeClient = buildNodeClient(threadPool); action.handleRequest(fakeRestRequest, fakeRestChannel, nodeClient); diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java index 0d35e4311032d..f83ba1704f954 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java @@ -222,7 +222,7 @@ public void next() { }) .withHeaders(Map.of("Content-Type", Collections.singletonList("application/json"))) .build(); - FakeRestChannel channel = new FakeRestChannel(request, true, 1); + FakeRestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); RestBulkAction.ChunkHandler chunkHandler = new RestBulkAction.ChunkHandler( true, diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java index aaa706acf525a..580dad8128494 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java @@ -51,7 +51,7 @@ public void testEnableFieldsEmulationNoErrors() throws Exception { .withParams(params) .build(); - action.handleRequest(request, new FakeRestChannel(request, true, 1), verifyingClient); + action.handleRequest(request, new FakeRestChannel(request, randomBoolean(), 1), verifyingClient); } public void testValidateSearchRequest() { diff --git a/server/src/test/java/org/elasticsearch/script/MultiVectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/MultiVectorScoreScriptUtilsTests.java new file mode 100644 index 0000000000000..f908f51170478 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/script/MultiVectorScoreScriptUtilsTests.java @@ -0,0 +1,342 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script; + +import org.apache.lucene.util.VectorUtil; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.ElementType; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorFieldMapper; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorScriptDocValuesTests; +import org.elasticsearch.script.MultiVectorScoreScriptUtils.MaxSimDotProduct; +import org.elasticsearch.script.MultiVectorScoreScriptUtils.MaxSimInvHamming; +import org.elasticsearch.script.field.vectors.BitMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.ByteMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.FloatMultiDenseVectorDocValuesField; +import org.elasticsearch.script.field.vectors.MultiDenseVectorDocValuesField; +import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HexFormat; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MultiVectorScoreScriptUtilsTests extends ESTestCase { + + @BeforeClass + public static void setup() { + assumeTrue("Requires multi-dense vector support", MultiDenseVectorFieldMapper.FEATURE_FLAG.isEnabled()); + } + + public void testFloatMultiVectorClassBindings() throws IOException { + String fieldName = "vector"; + int dims = 5; + float[][][] docVectors = new float[][][] { + { { 230.0f, 300.33f, -34.8988f, 15.555f, -200.0f }, { 100.0f, 200.0f, -50.0f, 10.0f, -150.0f } } }; + float[][] docMagnitudes = new float[][] { { 0.0f, 0.0f } }; + for (int i = 0; i < docVectors.length; i++) { + for (int j = 0; j < docVectors[i].length; j++) { + docMagnitudes[i][j] = (float) Math.sqrt(VectorUtil.dotProduct(docVectors[i][j], docVectors[i][j])); + } + } + + List> queryVector = List.of(Arrays.asList(0.5f, 111.3f, -13.0f, 14.8f, -156.0f)); + List> invalidQueryVector = List.of(Arrays.asList(0.5, 111.3)); + + List fields = List.of( + new FloatMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(docVectors, ElementType.FLOAT), + MultiDenseVectorScriptDocValuesTests.wrap(docMagnitudes), + "test", + ElementType.FLOAT, + dims + ), + new FloatMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(docVectors, ElementType.FLOAT), + MultiDenseVectorScriptDocValuesTests.wrap(docMagnitudes), + "test", + ElementType.FLOAT, + dims + ) + ); + for (MultiDenseVectorDocValuesField field : fields) { + field.setNextDocId(0); + + ScoreScript scoreScript = mock(ScoreScript.class); + when(scoreScript.field("vector")).thenAnswer(mock -> field); + + // Test max similarity dot product + MaxSimDotProduct maxSimDotProduct = new MaxSimDotProduct(scoreScript, queryVector, fieldName); + float maxSimDotProductExpected = 65425.625f; // Adjust this value based on expected max similarity + assertEquals( + "maxSimDotProduct result is not equal to the expected value!", + maxSimDotProductExpected, + maxSimDotProduct.maxSimDotProduct(), + 0.001 + ); + + // Check each function rejects query vectors with the wrong dimension + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new MultiVectorScoreScriptUtils.MaxSimDotProduct(scoreScript, invalidQueryVector, fieldName) + ); + assertThat( + e.getMessage(), + containsString("query vector has a different number of dimensions [2] than the document vectors [5]") + ); + e = expectThrows(IllegalArgumentException.class, () -> new MaxSimInvHamming(scoreScript, invalidQueryVector, fieldName)); + assertThat(e.getMessage(), containsString("hamming distance is only supported for byte or bit vectors")); + + // Check scripting infrastructure integration + assertEquals(65425.6249, new MaxSimDotProduct(scoreScript, queryVector, fieldName).maxSimDotProduct(), 0.001); + when(scoreScript._getDocId()).thenReturn(1); + e = expectThrows( + IllegalArgumentException.class, + () -> new MaxSimDotProduct(scoreScript, queryVector, fieldName).maxSimDotProduct() + ); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + } + } + + public void testByteMultiVectorClassBindings() throws IOException { + String fieldName = "vector"; + int dims = 5; + float[][] docVector = new float[][] { { 1, 127, -128, 5, -10 } }; + float[][] magnitudes = new float[][] { { 0 } }; + for (int i = 0; i < docVector.length; i++) { + magnitudes[i][0] = (float) Math.sqrt(VectorUtil.dotProduct(docVector[i], docVector[i])); + } + List> queryVector = List.of(Arrays.asList((byte) 1, (byte) 125, (byte) -12, (byte) 2, (byte) 4)); + List> invalidQueryVector = List.of(Arrays.asList((byte) 1, (byte) 1)); + List hexidecimalString = List.of(HexFormat.of().formatHex(new byte[] { 1, 125, -12, 2, 4 })); + + List fields = List.of( + new ByteMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(new float[][][] { docVector }, ElementType.BYTE), + MultiDenseVectorScriptDocValuesTests.wrap(magnitudes), + "test", + ElementType.BYTE, + dims + ) + ); + for (MultiDenseVectorDocValuesField field : fields) { + field.setNextDocId(0); + + ScoreScript scoreScript = mock(ScoreScript.class); + when(scoreScript.field(fieldName)).thenAnswer(mock -> field); + + // Check each function rejects query vectors with the wrong dimension + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new MaxSimDotProduct(scoreScript, invalidQueryVector, fieldName) + ); + assertThat( + e.getMessage(), + containsString("query vector has a different number of dimensions [2] than the document vectors [5]") + ); + e = expectThrows(IllegalArgumentException.class, () -> new MaxSimInvHamming(scoreScript, invalidQueryVector, fieldName)); + assertThat( + e.getMessage(), + containsString("query vector has a different number of dimensions [2] than the document vectors [5]") + ); + + // Check scripting infrastructure integration + assertEquals(17382.0, new MaxSimDotProduct(scoreScript, queryVector, fieldName).maxSimDotProduct(), 0.001); + assertEquals(17382.0, new MaxSimDotProduct(scoreScript, hexidecimalString, fieldName).maxSimDotProduct(), 0.001); + assertEquals(0.675, new MaxSimInvHamming(scoreScript, queryVector, fieldName).maxSimInvHamming(), 0.001); + assertEquals(0.675, new MaxSimInvHamming(scoreScript, hexidecimalString, fieldName).maxSimInvHamming(), 0.001); + MaxSimDotProduct maxSimDotProduct = new MaxSimDotProduct(scoreScript, queryVector, fieldName); + when(scoreScript._getDocId()).thenReturn(1); + e = expectThrows(IllegalArgumentException.class, maxSimDotProduct::maxSimDotProduct); + assertEquals("A document doesn't have a value for a multi-vector field!", e.getMessage()); + } + } + + public void testBitMultiVectorClassBindingsDotProduct() throws IOException { + String fieldName = "vector"; + int dims = 8; + float[][] docVector = new float[][] { { 124 } }; + // 124 in binary is b01111100 + List> queryVector = List.of( + Arrays.asList((byte) 1, (byte) 125, (byte) -12, (byte) 2, (byte) 4, (byte) 1, (byte) 125, (byte) -12) + ); + List> floatQueryVector = List.of(Arrays.asList(1.4f, -1.4f, 0.42f, 0.0f, 1f, -1f, -0.42f, 1.2f)); + List> invalidQueryVector = List.of(Arrays.asList((byte) 1, (byte) 1)); + List hexidecimalString = List.of(HexFormat.of().formatHex(new byte[] { 124 })); + + List fields = List.of( + new BitMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(new float[][][] { docVector }, ElementType.BIT), + MultiDenseVectorScriptDocValuesTests.wrap(new float[][] { { 5 } }), + "test", + ElementType.BIT, + dims + ) + ); + for (MultiDenseVectorDocValuesField field : fields) { + field.setNextDocId(0); + + ScoreScript scoreScript = mock(ScoreScript.class); + when(scoreScript.field(fieldName)).thenAnswer(mock -> field); + + MaxSimDotProduct function = new MaxSimDotProduct(scoreScript, queryVector, fieldName); + assertEquals( + "maxSimDotProduct result is not equal to the expected value!", + -12 + 2 + 4 + 1 + 125, + function.maxSimDotProduct(), + 0.001 + ); + + function = new MaxSimDotProduct(scoreScript, floatQueryVector, fieldName); + assertEquals( + "maxSimDotProduct result is not equal to the expected value!", + -1.4f + 0.42f + 0f + 1f - 1f, + function.maxSimDotProduct(), + 0.001 + ); + + function = new MaxSimDotProduct(scoreScript, hexidecimalString, fieldName); + assertEquals( + "maxSimDotProduct result is not equal to the expected value!", + Integer.bitCount(124), + function.maxSimDotProduct(), + 0.0 + ); + + // Check each function rejects query vectors with the wrong dimension + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new MaxSimDotProduct(scoreScript, invalidQueryVector, fieldName) + ); + assertThat( + e.getMessage(), + containsString( + "query vector contains inner vectors which have incorrect number of dimensions. " + + "Must be [1] for bitwise operations, or [8] for byte wise operations: provided [2]." + ) + ); + } + } + + public void testByteVsFloatSimilarity() throws IOException { + int dims = 5; + float[][] docVector = new float[][] { { 1f, 127f, -128f, 5f, -10f } }; + float[][] magnitudes = new float[][] { { 0 } }; + for (int i = 0; i < docVector.length; i++) { + magnitudes[i][0] = (float) Math.sqrt(VectorUtil.dotProduct(docVector[i], docVector[i])); + } + List> listFloatVector = List.of(Arrays.asList(1f, 125f, -12f, 2f, 4f)); + List> listByteVector = List.of(Arrays.asList((byte) 1, (byte) 125, (byte) -12, (byte) 2, (byte) 4)); + float[][] floatVector = new float[][] { { 1f, 125f, -12f, 2f, 4f } }; + byte[][] byteVector = new byte[][] { { (byte) 1, (byte) 125, (byte) -12, (byte) 2, (byte) 4 } }; + + List fields = List.of( + new FloatMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(new float[][][] { docVector }, ElementType.FLOAT), + MultiDenseVectorScriptDocValuesTests.wrap(magnitudes), + "field1", + ElementType.FLOAT, + dims + ), + new ByteMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(new float[][][] { docVector }, ElementType.BYTE), + MultiDenseVectorScriptDocValuesTests.wrap(magnitudes), + "field3", + ElementType.BYTE, + dims + ) + ); + for (MultiDenseVectorDocValuesField field : fields) { + field.setNextDocId(0); + + ScoreScript scoreScript = mock(ScoreScript.class); + when(scoreScript.field("vector")).thenAnswer(mock -> field); + + int dotProductExpected = 17382; + MaxSimDotProduct maxSimDotProduct = new MaxSimDotProduct(scoreScript, listFloatVector, "vector"); + assertEquals(field.getName(), dotProductExpected, maxSimDotProduct.maxSimDotProduct(), 0.001); + maxSimDotProduct = new MaxSimDotProduct(scoreScript, listByteVector, "vector"); + assertEquals(field.getName(), dotProductExpected, maxSimDotProduct.maxSimDotProduct(), 0.001); + switch (field.getElementType()) { + case BYTE -> { + assertEquals(field.getName(), dotProductExpected, field.get().maxSimDotProduct(byteVector), 0.001); + UnsupportedOperationException e = expectThrows( + UnsupportedOperationException.class, + () -> field.get().maxSimDotProduct(floatVector) + ); + assertThat(e.getMessage(), containsString("use [float maxSimDotProduct(byte[][] queryVector)] instead")); + } + case FLOAT -> { + assertEquals(field.getName(), dotProductExpected, field.get().maxSimDotProduct(floatVector), 0.001); + UnsupportedOperationException e = expectThrows( + UnsupportedOperationException.class, + () -> field.get().maxSimDotProduct(byteVector) + ); + assertThat(e.getMessage(), containsString("use [float maxSimDotProduct(float[][] queryVector)] instead")); + } + } + } + } + + public void testByteBoundaries() throws IOException { + String fieldName = "vector"; + int dims = 1; + float[] docVector = new float[] { 0 }; + List> greaterThanVector = List.of(List.of(128)); + List> lessThanVector = List.of(List.of(-129)); + List> decimalVector = List.of(List.of(0.5)); + + List fields = List.of( + new ByteMultiDenseVectorDocValuesField( + MultiDenseVectorScriptDocValuesTests.wrap(new float[][][] { { docVector } }, ElementType.BYTE), + MultiDenseVectorScriptDocValuesTests.wrap(new float[][] { { 1 } }), + "test", + ElementType.BYTE, + dims + ) + ); + + for (MultiDenseVectorDocValuesField field : fields) { + field.setNextDocId(0); + + ScoreScript scoreScript = mock(ScoreScript.class); + when(scoreScript.field(fieldName)).thenAnswer(mock -> field); + + IllegalArgumentException e; + + e = expectThrows(IllegalArgumentException.class, () -> new MaxSimDotProduct(scoreScript, greaterThanVector, fieldName)); + assertEquals( + "element_type [byte] vectors only support integers between [-128, 127] but found [128.0] at dim [0]; " + + "Preview of invalid vector: [128.0]", + e.getMessage() + ); + + e = expectThrows(IllegalArgumentException.class, () -> new MaxSimDotProduct(scoreScript, lessThanVector, fieldName)); + assertEquals( + e.getMessage(), + "element_type [byte] vectors only support integers between [-128, 127] but found [-129.0] at dim [0]; " + + "Preview of invalid vector: [-129.0]" + ); + e = expectThrows(IllegalArgumentException.class, () -> new MaxSimDotProduct(scoreScript, decimalVector, fieldName)); + assertEquals( + e.getMessage(), + "element_type [byte] vectors only support non-decimal values but found decimal value [0.5] at dim [0]; " + + "Preview of invalid vector: [0.5]" + ); + } + } + + public void testDimMismatch() throws IOException { + + } +} diff --git a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java index 6b2178310d17c..dcaa64ede9e89 100644 --- a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java @@ -267,7 +267,7 @@ public void testBitVectorClassBindingsDotProduct() throws IOException { function = new DotProduct(scoreScript, floatQueryVector, fieldName); assertEquals( "dotProduct result is not equal to the expected value!", - 0.42f + 0f + 1f - 1f - 0.42f, + -1.4f + 0.42f + 0f + 1f - 1f, function.dotProduct(), 0.001 ); diff --git a/server/src/test/java/org/elasticsearch/script/field/vectors/MultiDenseVectorTests.java b/server/src/test/java/org/elasticsearch/script/field/vectors/MultiDenseVectorTests.java new file mode 100644 index 0000000000000..12f4b931b4d0a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/script/field/vectors/MultiDenseVectorTests.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.field.vectors; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.VectorUtil; +import org.elasticsearch.index.mapper.vectors.MultiDenseVectorFieldMapper; +import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.function.IntFunction; + +public class MultiDenseVectorTests extends ESTestCase { + + @BeforeClass + public static void setup() { + assumeTrue("Requires multi-dense vector support", MultiDenseVectorFieldMapper.FEATURE_FLAG.isEnabled()); + } + + public void testByteUnsupported() { + int count = randomIntBetween(1, 16); + int dims = randomIntBetween(1, 16); + byte[][] docVector = new byte[count][dims]; + float[][] queryVector = new float[count][dims]; + for (int i = 0; i < docVector.length; i++) { + random().nextBytes(docVector[i]); + for (int j = 0; j < dims; j++) { + queryVector[i][j] = randomFloat(); + } + } + + MultiDenseVector knn = newByteVector(docVector); + UnsupportedOperationException e; + + e = expectThrows(UnsupportedOperationException.class, () -> knn.maxSimDotProduct(queryVector)); + assertEquals(e.getMessage(), "use [float maxSimDotProduct(byte[][] queryVector)] instead"); + } + + public void testFloatUnsupported() { + int count = randomIntBetween(1, 16); + int dims = randomIntBetween(1, 16); + float[][] docVector = new float[count][dims]; + byte[][] queryVector = new byte[count][dims]; + for (int i = 0; i < docVector.length; i++) { + random().nextBytes(queryVector[i]); + for (int j = 0; j < dims; j++) { + docVector[i][j] = randomFloat(); + } + } + + MultiDenseVector knn = newFloatVector(docVector); + + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> knn.maxSimDotProduct(queryVector)); + assertEquals(e.getMessage(), "use [float maxSimDotProduct(float[][] queryVector)] instead"); + } + + static MultiDenseVector newFloatVector(float[][] vector) { + BytesRef magnitudes = magnitudes(vector.length, i -> (float) Math.sqrt(VectorUtil.dotProduct(vector[i], vector[i]))); + return new FloatMultiDenseVector(VectorIterator.from(vector), magnitudes, vector.length, vector[0].length); + } + + static MultiDenseVector newByteVector(byte[][] vector) { + BytesRef magnitudes = magnitudes(vector.length, i -> (float) Math.sqrt(VectorUtil.dotProduct(vector[i], vector[i]))); + return new ByteMultiDenseVector(VectorIterator.from(vector), magnitudes, vector.length, vector[0].length); + } + + static BytesRef magnitudes(int count, IntFunction magnitude) { + ByteBuffer magnitudeBuffer = ByteBuffer.allocate(count * Float.BYTES).order(ByteOrder.LITTLE_ENDIAN); + for (int i = 0; i < count; i++) { + magnitudeBuffer.putFloat(magnitude.apply(i)); + } + return new BytesRef(magnitudeBuffer.array()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AdaptingAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AdaptingAggregatorTests.java index 125b2d20cf9f3..6e9bb596e944b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AdaptingAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AdaptingAggregatorTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperServiceTestCase; @@ -113,7 +114,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) { return new InternalAggregation[] { null }; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorBaseTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorBaseTests.java index 8d3fe0f7f6e79..2d0622dbb6322 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorBaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorBaseTests.java @@ -15,6 +15,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -47,7 +48,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java index bd423999722f3..c9185fe35e677 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java @@ -137,17 +137,15 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont new InternalFiltersForF2( "f2", List.of( - new InternalFilters.InternalBucket("f2k1", k1k1, InternalAggregations.EMPTY, true, true), - new InternalFilters.InternalBucket("f2k2", k1k2, InternalAggregations.EMPTY, true, true) + new InternalFilters.InternalBucket("f2k1", k1k1, InternalAggregations.EMPTY), + new InternalFilters.InternalBucket("f2k2", k1k2, InternalAggregations.EMPTY) ), true, true, null ) ) - ), - true, - true + ) ), new InternalFilters.InternalBucket( "f1k2", @@ -157,17 +155,15 @@ protected AggregatorReducer getLeaderReducer(AggregationReduceContext reduceCont new InternalFiltersForF2( "f2", List.of( - new InternalFilters.InternalBucket("f2k1", k2k1, InternalAggregations.EMPTY, true, true), - new InternalFilters.InternalBucket("f2k2", k2k2, InternalAggregations.EMPTY, true, true) + new InternalFilters.InternalBucket("f2k1", k2k1, InternalAggregations.EMPTY), + new InternalFilters.InternalBucket("f2k2", k2k2, InternalAggregations.EMPTY) ), true, true, null ) ) - ), - true, - true + ) ) ), true, @@ -192,17 +188,15 @@ InternalAggregations reduced(int k1, int k2, int k1k1, int k1k2, int k2k1, int k new InternalFilters( "f2", List.of( - new InternalFilters.InternalBucket("f2k1", k1k1, InternalAggregations.EMPTY, true, true), - new InternalFilters.InternalBucket("f2k2", k1k2, InternalAggregations.EMPTY, true, true) + new InternalFilters.InternalBucket("f2k1", k1k1, InternalAggregations.EMPTY), + new InternalFilters.InternalBucket("f2k2", k1k2, InternalAggregations.EMPTY) ), true, true, null ) ) - ), - true, - true + ) ), new InternalFilters.InternalBucket( "f1k2", @@ -212,17 +206,15 @@ InternalAggregations reduced(int k1, int k2, int k1k1, int k1k2, int k2k1, int k new InternalFilters( "f2", List.of( - new InternalFilters.InternalBucket("f2k1", k2k1, InternalAggregations.EMPTY, true, true), - new InternalFilters.InternalBucket("f2k2", k2k2, InternalAggregations.EMPTY, true, true) + new InternalFilters.InternalBucket("f2k1", k2k1, InternalAggregations.EMPTY), + new InternalFilters.InternalBucket("f2k2", k2k2, InternalAggregations.EMPTY) ), true, true, null ) ) - ), - true, - true + ) ) ), true, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java index 9b6ea7272d0f9..e796cee92c0dc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java @@ -28,6 +28,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.BucketCollector; @@ -77,7 +79,7 @@ public ScoreMode scoreMode() { collector.preCollection(); indexSearcher.search(termQuery, collector.asCollector()); collector.postCollection(); - collector.prepareSelectedBuckets(0); + collector.prepareSelectedBuckets(BigArrays.NON_RECYCLING_INSTANCE.newLongArray(1, true)); assertEquals(topDocs.scoreDocs.length, deferredCollectedDocIds.size()); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { @@ -91,7 +93,7 @@ public ScoreMode scoreMode() { collector.preCollection(); indexSearcher.search(new MatchAllDocsQuery(), collector.asCollector()); collector.postCollection(); - collector.prepareSelectedBuckets(0); + collector.prepareSelectedBuckets(BigArrays.NON_RECYCLING_INSTANCE.newLongArray(1, true)); assertEquals(topDocs.scoreDocs.length, deferredCollectedDocIds.size()); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { @@ -141,7 +143,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } } }, (deferringCollector, finalCollector) -> { - deferringCollector.prepareSelectedBuckets(0, 8, 9); + deferringCollector.prepareSelectedBuckets(toLongArray(0, 8, 9)); equalTo(Map.of(0L, List.of(0, 1, 2, 3, 4, 5, 6, 7), 1L, List.of(8), 2L, List.of(9))); }); @@ -158,7 +160,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } } }, (deferringCollector, finalCollector) -> { - deferringCollector.prepareSelectedBuckets(0, 8, 9); + deferringCollector.prepareSelectedBuckets(toLongArray(0, 8, 9)); assertThat(finalCollector.collection, equalTo(Map.of(0L, List.of(4, 5, 6, 7), 1L, List.of(8), 2L, List.of(9)))); }); @@ -176,12 +178,20 @@ public void collect(int doc, long owningBucketOrd) throws IOException { } } }, (deferringCollector, finalCollector) -> { - deferringCollector.prepareSelectedBuckets(0, 8, 9); + deferringCollector.prepareSelectedBuckets(toLongArray(0, 8, 9)); assertThat(finalCollector.collection, equalTo(Map.of(0L, List.of(0, 1, 2, 3), 1L, List.of(8), 2L, List.of(9)))); }); } + private LongArray toLongArray(long... lons) { + LongArray longArray = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(lons.length); + for (int i = 0; i < lons.length; i++) { + longArray.set(i, lons[i]); + } + return longArray; + } + private void testCase( BiFunction leafCollector, CheckedBiConsumer verify diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregatorTests.java index 80f27b31ca65b..fb4c62ad66f19 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregatorTests.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -72,7 +73,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) { return new InternalAggregation[0]; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 5fb1d0e760afa..7e7ccb1d72e80 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -143,18 +143,10 @@ protected InternalComposite createTestInstance(String name, Map continue; } keys.add(key); - InternalComposite.InternalBucket bucket = new InternalComposite.InternalBucket( - sourceNames, - formats, - key, - reverseMuls, - missingOrders, - 1L, - aggregations - ); + InternalComposite.InternalBucket bucket = new InternalComposite.InternalBucket(sourceNames, formats, key, 1L, aggregations); buckets.add(bucket); } - Collections.sort(buckets, (o1, o2) -> o1.compareKey(o2)); + Collections.sort(buckets, (o1, o2) -> o1.compareKey(o2, reverseMuls, missingOrders)); CompositeKey lastBucket = buckets.size() > 0 ? buckets.get(buckets.size() - 1).getRawKey() : null; return new InternalComposite( name, @@ -191,8 +183,6 @@ protected InternalComposite mutateInstance(InternalComposite instance) { sourceNames, formats, createCompositeKey(), - reverseMuls, - missingOrders, randomLongBetween(1, 100), InternalAggregations.EMPTY ) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index db32d796ea76a..ba186695bcdae 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.search.Queries; @@ -649,13 +648,7 @@ public void testMatchAllOnFilteredIndex() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, @@ -721,13 +714,7 @@ public void testTermOnFilteredIndex() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, @@ -790,13 +777,7 @@ public void testTermOnFilterWithMatchAll() throws IOException { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final IndexSettings indexSettings = createIndexSettings(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); DirectoryReader limitedReader = new DocumentSubsetDirectoryReader( ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexSettings.getIndex(), 0)), bitsetFilterCache, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFiltersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFiltersTests.java index c300bfed5f62a..ad2543548dcae 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFiltersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFiltersTests.java @@ -59,10 +59,9 @@ public void setUp() throws Exception { @Override protected InternalFilters createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final List buckets = new ArrayList<>(); - for (int i = 0; i < keys.size(); ++i) { - String key = keys.get(i); + for (String key : keys) { int docCount = randomIntBetween(0, 1000); - buckets.add(new InternalFilters.InternalBucket(key, docCount, aggregations, keyed, keyedBucket)); + buckets.add(new InternalBucket(key, docCount, aggregations)); } return new InternalFilters(name, buckets, keyed, keyedBucket, metadata); } @@ -94,7 +93,7 @@ protected InternalFilters mutateInstance(InternalFilters instance) { case 0 -> name += randomAlphaOfLength(5); case 1 -> { buckets = new ArrayList<>(buckets); - buckets.add(new InternalBucket("test", randomIntBetween(0, 1000), InternalAggregations.EMPTY, keyed, keyedBucket)); + buckets.add(new InternalBucket("test", randomIntBetween(0, 1000), InternalAggregations.EMPTY)); } default -> { if (metadata == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java index 9e6829139d772..5eb1500e37269 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java @@ -106,7 +106,7 @@ private InternalDateHistogram createTestInstance( // rarely leave some holes to be filled up with empty buckets in case minDocCount is set to 0 if (frequently()) { long key = startingDate + intervalMillis * i; - buckets.add(new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), keyed, format, aggregations)); + buckets.add(new InternalDateHistogram.Bucket(key, randomIntBetween(1, 100), format, aggregations)); } } BucketOrder order = BucketOrder.key(randomBoolean()); @@ -181,13 +181,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) { case 1 -> { buckets = new ArrayList<>(buckets); buckets.add( - new InternalDateHistogram.Bucket( - randomNonNegativeLong(), - randomIntBetween(1, 100), - keyed, - format, - InternalAggregations.EMPTY - ) + new InternalDateHistogram.Bucket(randomNonNegativeLong(), randomIntBetween(1, 100), format, InternalAggregations.EMPTY) ); } case 2 -> order = BucketOrder.count(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java index db93bc5dfe179..f97a836712e36 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogramTests.java @@ -74,7 +74,7 @@ protected InternalHistogram createTestInstance(String name, Map // rarely leave some holes to be filled up with empty buckets in case minDocCount is set to 0 if (frequently()) { final int docCount = TestUtil.nextInt(random(), 1, 50); - buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, keyed, format, aggregations)); + buckets.add(new InternalHistogram.Bucket(base + i * interval, docCount, format, aggregations)); } } BucketOrder order = BucketOrder.key(randomBoolean()); @@ -96,7 +96,7 @@ public void testHandlesNaN() { newBuckets.addAll(buckets.subList(0, buckets.size() - 1)); } InternalHistogram.Bucket b = buckets.get(buckets.size() - 1); - newBuckets.add(new InternalHistogram.Bucket(Double.NaN, b.docCount, keyed, b.format, b.aggregations)); + newBuckets.add(new InternalHistogram.Bucket(Double.NaN, b.docCount, b.format, b.aggregations)); List reduceMe = List.of(histogram, histogram2); InternalAggregationTestCase.reduce(reduceMe, mockReduceContext(mockBuilder(reduceMe)).forPartialReduction()); @@ -171,13 +171,7 @@ protected InternalHistogram mutateInstance(InternalHistogram instance) { case 1 -> { buckets = new ArrayList<>(buckets); buckets.add( - new InternalHistogram.Bucket( - randomNonNegativeLong(), - randomIntBetween(1, 100), - keyed, - format, - InternalAggregations.EMPTY - ) + new InternalHistogram.Bucket(randomNonNegativeLong(), randomIntBetween(1, 100), format, InternalAggregations.EMPTY) ); } case 2 -> order = BucketOrder.count(randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefixTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefixTests.java index 5ca78f322491b..dc5b57619676e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefixTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefixTests.java @@ -75,16 +75,7 @@ private InternalIpPrefix createTestInstance( BytesRef key = itr.next(); boolean v6 = InetAddressPoint.decode(key.bytes) instanceof Inet6Address; buckets.add( - new InternalIpPrefix.Bucket( - DocValueFormat.IP, - key, - keyed, - v6, - prefixLength, - appendPrefixLength, - randomLongBetween(0, Long.MAX_VALUE), - aggregations - ) + new InternalIpPrefix.Bucket(key, v6, prefixLength, appendPrefixLength, randomLongBetween(0, Long.MAX_VALUE), aggregations) ); } @@ -126,7 +117,6 @@ protected void assertReduced(InternalIpPrefix reduced, List in Map expectedCounts = new HashMap<>(); for (InternalIpPrefix i : inputs) { for (InternalIpPrefix.Bucket b : i.getBuckets()) { - assertThat(b.getFormat(), equalTo(DocValueFormat.IP)); long acc = expectedCounts.getOrDefault(b.getKey(), 0L); acc += b.getDocCount(); expectedCounts.put(b.getKey(), acc); @@ -146,20 +136,16 @@ protected void assertReduced(InternalIpPrefix reduced, List in public void testPartialReduceNoMinDocCount() { InternalIpPrefix.Bucket b1 = new InternalIpPrefix.Bucket( - DocValueFormat.IP, new BytesRef(InetAddressPoint.encode(InetAddresses.forString("192.168.0.1"))), false, - false, 1, false, 1, InternalAggregations.EMPTY ); InternalIpPrefix.Bucket b2 = new InternalIpPrefix.Bucket( - DocValueFormat.IP, new BytesRef(InetAddressPoint.encode(InetAddresses.forString("200.0.0.1"))), false, - false, 1, false, 2, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRangeTests.java index b888e61e1bbf9..383065193c4d5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRangeTests.java @@ -72,7 +72,7 @@ protected InternalBinaryRange createTestInstance( for (int i = 0; i < ranges.size(); ++i) { final int docCount = randomIntBetween(1, 100); final String key = (i == nullKey) ? null : randomAlphaOfLength(10); - buckets.add(new InternalBinaryRange.Bucket(format, keyed, key, ranges.get(i).v1(), ranges.get(i).v2(), docCount, aggregations)); + buckets.add(new InternalBinaryRange.Bucket(format, key, ranges.get(i).v1(), ranges.get(i).v2(), docCount, aggregations)); } return new InternalBinaryRange(name, format, keyed, buckets, metadata); } @@ -113,7 +113,6 @@ protected InternalBinaryRange mutateInstance(InternalBinaryRange instance) { buckets.add( new InternalBinaryRange.Bucket( format, - keyed, "range_a", new BytesRef(randomAlphaOfLengthBetween(1, 20)), new BytesRef(randomAlphaOfLengthBetween(1, 20)), diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRangeTests.java index 255ad7c4417b3..fdfffaf8fb8e7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRangeTests.java @@ -81,7 +81,7 @@ protected InternalDateRange createTestInstance( int docCount = randomIntBetween(0, 1000); double from = range.v1(); double to = range.v2(); - buckets.add(new InternalDateRange.Bucket("range_" + i, from, to, docCount, aggregations, keyed, format)); + buckets.add(new InternalDateRange.Bucket("range_" + i, from, to, docCount, aggregations, format)); } return new InternalDateRange(name, buckets, format, keyed, metadata); } @@ -105,9 +105,7 @@ protected InternalDateRange mutateInstance(InternalDateRange instance) { buckets = new ArrayList<>(buckets); double from = randomDouble(); double to = from + randomDouble(); - buckets.add( - new InternalDateRange.Bucket("range_a", from, to, randomNonNegativeLong(), InternalAggregations.EMPTY, false, format) - ); + buckets.add(new InternalDateRange.Bucket("range_a", from, to, randomNonNegativeLong(), InternalAggregations.EMPTY, format)); } case 3 -> { if (metadata == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistanceTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistanceTests.java index 49144ec2f40fb..dcb41322a9426 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistanceTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistanceTests.java @@ -63,7 +63,7 @@ protected InternalGeoDistance createTestInstance( int docCount = randomIntBetween(0, 1000); double from = range.v1(); double to = range.v2(); - buckets.add(new InternalGeoDistance.Bucket("range_" + i, from, to, docCount, aggregations, keyed)); + buckets.add(new InternalGeoDistance.Bucket("range_" + i, from, to, docCount, aggregations)); } return new InternalGeoDistance(name, buckets, keyed, metadata); } @@ -86,9 +86,7 @@ protected InternalGeoDistance mutateInstance(InternalGeoDistance instance) { buckets = new ArrayList<>(buckets); double from = randomDouble(); double to = from + randomDouble(); - buckets.add( - new InternalGeoDistance.Bucket("range_a", from, to, randomNonNegativeLong(), InternalAggregations.EMPTY, false) - ); + buckets.add(new InternalGeoDistance.Bucket("range_a", from, to, randomNonNegativeLong(), InternalAggregations.EMPTY)); } case 3 -> { if (metadata == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalRangeTests.java index da0fbd94d6ed6..0d957255b6416 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/range/InternalRangeTests.java @@ -76,7 +76,7 @@ public void setUp() throws Exception { int docCount = randomIntBetween(0, 1000); double from = range.v1(); double to = range.v2(); - buckets.add(new InternalRange.Bucket("range_" + i, from, to, docCount, aggregations, keyed, format)); + buckets.add(new InternalRange.Bucket("range_" + i, from, to, docCount, aggregations, format)); } return new InternalRange<>(name, buckets, format, keyed, metadata); } @@ -100,9 +100,7 @@ protected Class interna buckets = new ArrayList<>(buckets); double from = randomDouble(); double to = from + randomDouble(); - buckets.add( - new InternalRange.Bucket("range_a", from, to, randomNonNegativeLong(), InternalAggregations.EMPTY, false, format) - ); + buckets.add(new InternalRange.Bucket("range_a", from, to, randomNonNegativeLong(), InternalAggregations.EMPTY, format)); } case 3 -> { if (metadata == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java index 2df6a0cfb91ca..a0a24e98ae721 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -68,7 +69,7 @@ public void testReplay() throws Exception { collector.preCollection(); indexSearcher.search(termQuery, collector.asCollector()); collector.postCollection(); - collector.prepareSelectedBuckets(0); + collector.prepareSelectedBuckets(BigArrays.NON_RECYCLING_INSTANCE.newLongArray(1, true)); assertEquals(topDocs.scoreDocs.length, deferredCollectedDocIds.size()); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java index f75f9f474c8e8..2f51a5a09a8ac 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregatorTests.java @@ -11,22 +11,29 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Min; +import org.elasticsearch.search.aggregations.metrics.TopHits; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.DoubleStream; @@ -37,6 +44,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notANumber; @@ -76,6 +85,35 @@ public void testAggregationSampling() throws IOException { assertThat(avgAvg, closeTo(1.5, 0.5)); } + public void testAggregationSampling_withScores() throws IOException { + long[] counts = new long[5]; + AtomicInteger integer = new AtomicInteger(); + do { + testCase(RandomSamplerAggregatorTests::writeTestDocs, (InternalRandomSampler result) -> { + counts[integer.get()] = result.getDocCount(); + if (result.getDocCount() > 0) { + TopHits agg = result.getAggregations().get("top"); + List hits = Arrays.asList(agg.getHits().getHits()); + assertThat(Strings.toString(result), hits, hasSize(1)); + assertThat(Strings.toString(result), hits.get(0).getScore(), allOf(greaterThan(0.0f), lessThan(1.0f))); + } + }, + new AggTestConfig( + new RandomSamplerAggregationBuilder("my_agg").subAggregation(AggregationBuilders.topHits("top").size(1)) + .setProbability(0.25), + longField(NUMERIC_FIELD_NAME) + ).withQuery( + new BooleanQuery.Builder().add( + new TermQuery(new Term(KEYWORD_FIELD_NAME, KEYWORD_FIELD_VALUE)), + BooleanClause.Occur.SHOULD + ).build() + ) + ); + } while (integer.incrementAndGet() < 5); + long avgCount = LongStream.of(counts).sum() / integer.get(); + assertThat(avgCount, allOf(greaterThanOrEqualTo(20L), lessThanOrEqualTo(70L))); + } + public void testAggregationSamplingNestedAggsScaled() throws IOException { // in case 0 docs get sampled, which can rarely happen // in case the test index has many segments. diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java index 9f667b3efcb61..626adc9a7c41c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketHelpersTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; @@ -56,10 +55,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws }; InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() { - @Override - public void writeTo(StreamOutput out) throws IOException { - - } @Override public Object getKey() { @@ -81,11 +76,6 @@ public InternalAggregations getAggregations() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return null; - } - @Override public Object getProperty(String containingAggName, List path) { return new Object[0]; @@ -136,10 +126,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws }; InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() { - @Override - public void writeTo(StreamOutput out) throws IOException { - - } @Override public Object getKey() { @@ -161,11 +147,6 @@ public InternalAggregations getAggregations() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return null; - } - @Override public Object getProperty(String containingAggName, List path) { return mock(InternalTDigestPercentiles.class); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java index c699e117ffbf4..d5e930321db95 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java @@ -77,7 +77,7 @@ protected SearchHit nextDoc(int doc) { } }; - SearchHit[] hits = it.iterate(null, reader, docs); + SearchHit[] hits = it.iterate(null, reader, docs, randomBoolean()); assertThat(hits.length, equalTo(docs.length)); for (int i = 0; i < hits.length; i++) { @@ -125,7 +125,7 @@ protected SearchHit nextDoc(int doc) { } }; - Exception e = expectThrows(FetchPhaseExecutionException.class, () -> it.iterate(null, reader, docs)); + Exception e = expectThrows(FetchPhaseExecutionException.class, () -> it.iterate(null, reader, docs, randomBoolean())); assertThat(e.getMessage(), containsString("Error running fetch phase for doc [" + badDoc + "]")); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 3699cdee3912b..d1bbc1ec5910b 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -607,17 +607,6 @@ public void testOrderSerialization() throws Exception { } } - public void testForceSourceDeprecation() throws IOException { - String highlightJson = """ - { "fields" : { }, "force_source" : true } - """; - try (XContentParser parser = createParser(JsonXContent.jsonXContent, highlightJson)) { - HighlightBuilder.fromXContent(parser); - } - - assertWarnings("Deprecated field [force_source] used, this field is unused and will be removed entirely"); - } - protected static XContentBuilder toXContent(HighlightBuilder highlight, XContentType contentType) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(contentType); if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index 9957d8c92b955..fe07cbf8efdfd 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -53,7 +53,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.Bits; @@ -308,19 +307,8 @@ public void doTestContextIndexSearcher(boolean sparse, boolean deletions) throws w.deleteDocuments(new Term("delete", "yes")); IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); - BitsetFilterCache.Listener listener = new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }; DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(w), new ShardId(settings.getIndex(), 0)); - BitsetFilterCache cache = new BitsetFilterCache(settings, listener); + BitsetFilterCache cache = new BitsetFilterCache(settings, BitsetFilterCache.Listener.NOOP); Query roleQuery = new TermQuery(new Term("allowed", "yes")); BitSet bitSet = cache.getBitSetProducer(roleQuery).getBitSet(reader.leaves().get(0)); if (sparse) { diff --git a/server/src/test/java/org/elasticsearch/search/rank/RankFeatureShardPhaseTests.java b/server/src/test/java/org/elasticsearch/search/rank/RankFeatureShardPhaseTests.java index 6250d1679fda3..41febe77d54aa 100644 --- a/server/src/test/java/org/elasticsearch/search/rank/RankFeatureShardPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/rank/RankFeatureShardPhaseTests.java @@ -219,8 +219,7 @@ public void testPrepareForFetch() { RankFeatureShardRequest request = mock(RankFeatureShardRequest.class); when(request.getDocIds()).thenReturn(new int[] { 4, 9, numDocs - 1 }); - RankFeatureShardPhase rankFeatureShardPhase = new RankFeatureShardPhase(); - rankFeatureShardPhase.prepareForFetch(searchContext, request); + RankFeatureShardPhase.prepareForFetch(searchContext, request); assertNotNull(searchContext.fetchFieldsContext()); assertEquals(searchContext.fetchFieldsContext().fields().size(), 1); @@ -248,8 +247,7 @@ public void testPrepareForFetchNoRankFeatureContext() { RankFeatureShardRequest request = mock(RankFeatureShardRequest.class); when(request.getDocIds()).thenReturn(new int[] { 4, 9, numDocs - 1 }); - RankFeatureShardPhase rankFeatureShardPhase = new RankFeatureShardPhase(); - rankFeatureShardPhase.prepareForFetch(searchContext, request); + RankFeatureShardPhase.prepareForFetch(searchContext, request); assertNull(searchContext.fetchFieldsContext()); assertNull(searchContext.fetchResult()); @@ -274,8 +272,7 @@ public void testPrepareForFetchWhileTaskIsCancelled() { RankFeatureShardRequest request = mock(RankFeatureShardRequest.class); when(request.getDocIds()).thenReturn(new int[] { 4, 9, numDocs - 1 }); - RankFeatureShardPhase rankFeatureShardPhase = new RankFeatureShardPhase(); - expectThrows(TaskCancelledException.class, () -> rankFeatureShardPhase.prepareForFetch(searchContext, request)); + expectThrows(TaskCancelledException.class, () -> RankFeatureShardPhase.prepareForFetch(searchContext, request)); } } @@ -318,11 +315,10 @@ public void testProcessFetch() { RankFeatureShardRequest request = mock(RankFeatureShardRequest.class); when(request.getDocIds()).thenReturn(new int[] { 4, 9, numDocs - 1 }); - RankFeatureShardPhase rankFeatureShardPhase = new RankFeatureShardPhase(); // this is called as part of the search context initialization // with the ResultsType.RANK_FEATURE type searchContext.addRankFeatureResult(); - rankFeatureShardPhase.processFetch(searchContext); + RankFeatureShardPhase.processFetch(searchContext); assertNotNull(searchContext.rankFeatureResult()); assertNotNull(searchContext.rankFeatureResult().rankFeatureResult()); @@ -365,11 +361,10 @@ public void testProcessFetchEmptyHits() { RankFeatureShardRequest request = mock(RankFeatureShardRequest.class); when(request.getDocIds()).thenReturn(new int[] { 4, 9, numDocs - 1 }); - RankFeatureShardPhase rankFeatureShardPhase = new RankFeatureShardPhase(); // this is called as part of the search context initialization // with the ResultsType.RANK_FEATURE type searchContext.addRankFeatureResult(); - rankFeatureShardPhase.processFetch(searchContext); + RankFeatureShardPhase.processFetch(searchContext); assertNotNull(searchContext.rankFeatureResult()); assertNotNull(searchContext.rankFeatureResult().rankFeatureResult()); @@ -410,11 +405,10 @@ public void testProcessFetchWhileTaskIsCancelled() { RankFeatureShardRequest request = mock(RankFeatureShardRequest.class); when(request.getDocIds()).thenReturn(new int[] { 4, 9, numDocs - 1 }); - RankFeatureShardPhase rankFeatureShardPhase = new RankFeatureShardPhase(); // this is called as part of the search context initialization // with the ResultsType.RANK_FEATURE type searchContext.addRankFeatureResult(); - expectThrows(TaskCancelledException.class, () -> rankFeatureShardPhase.processFetch(searchContext)); + expectThrows(TaskCancelledException.class, () -> RankFeatureShardPhase.processFetch(searchContext)); } finally { if (searchHits != null) { searchHits.decRef(); diff --git a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java index 33978b4cd6b9f..d91b4430e4f94 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java @@ -54,7 +54,7 @@ public void clearScroll(ClearScrollRequest request, ActionListener newCredentialsConsumer; + private final String webIdentityToken; + + public AwsStsHttpFixture(BiConsumer newCredentialsConsumer, String webIdentityToken) { + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.webIdentityToken = Objects.requireNonNull(webIdentityToken); + } + + protected HttpHandler createHandler() { + return new AwsStsHttpHandler(newCredentialsConsumer, webIdentityToken); + } + + public String getAddress() { + return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort(); + } + + public void stop(int delay) { + server.stop(delay); + } + + protected void before() throws Throwable { + server = HttpServer.create(resolveAddress(), 0); + server.createContext("/", Objects.requireNonNull(createHandler())); + server.start(); + } + + @Override + protected void after() { + stop(0); + } + + private static InetSocketAddress resolveAddress() { + try { + return new InetSocketAddress(InetAddress.getByName("localhost"), 0); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } +} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java similarity index 66% rename from test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java rename to test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java index 54e0be1e321a2..ac3299f157485 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java +++ b/test/fixtures/aws-sts-fixture/src/main/java/fixture/aws/sts/AwsStsHttpHandler.java @@ -6,12 +6,16 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -package fixture.s3; +package fixture.aws.sts; +import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.time.ZonedDateTime; @@ -19,53 +23,40 @@ import java.util.Arrays; import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; import java.util.stream.Collectors; -public class S3HttpFixtureWithSTS extends S3HttpFixture { +import static org.elasticsearch.test.ESTestCase.randomIdentifier; +import static org.elasticsearch.test.ESTestCase.randomSecretKey; - private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; - private static final String ROLE_NAME = "sts-fixture-test"; - private final String sessionToken; - private final String webIdentityToken; +/** + * Minimal HTTP handler that emulates the AWS STS server + */ +@SuppressForbidden(reason = "this test uses a HttpServer to emulate the AWS STS endpoint") +public class AwsStsHttpHandler implements HttpHandler { - public S3HttpFixtureWithSTS() { - this(true); - } + static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; + static final String ROLE_NAME = "sts-fixture-test"; - public S3HttpFixtureWithSTS(boolean enabled) { - this( - enabled, - "sts_bucket", - "sts_base_path", - "sts_access_key", - "sts_session_token", - "Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ" - ); - } + private final BiConsumer newCredentialsConsumer; + private final String webIdentityToken; - public S3HttpFixtureWithSTS( - boolean enabled, - String bucket, - String basePath, - String accessKey, - String sessionToken, - String webIdentityToken - ) { - super(enabled, bucket, basePath, accessKey); - this.sessionToken = sessionToken; - this.webIdentityToken = webIdentityToken; + public AwsStsHttpHandler(BiConsumer newCredentialsConsumer, String webIdentityToken) { + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.webIdentityToken = Objects.requireNonNull(webIdentityToken); } @Override - protected HttpHandler createHandler() { - final HttpHandler delegate = super.createHandler(); + public void handle(final HttpExchange exchange) throws IOException { + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + + try (exchange) { + final var requestMethod = exchange.getRequestMethod(); + final var path = exchange.getRequestURI().getPath(); + + if ("POST".equals(requestMethod) && "/assume-role-with-web-identity/".equals(path)) { - return exchange -> { - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - // It's run as a separate service, but we emulate it under the `assume-role-with-web-identity` endpoint - // of the S3 serve for the simplicity sake - if ("POST".equals(exchange.getRequestMethod()) - && exchange.getRequestURI().getPath().startsWith("/assume-role-with-web-identity")) { String body = new String(exchange.getRequestBody().readAllBytes(), StandardCharsets.UTF_8); Map params = Arrays.stream(body.split("&")) .map(e -> e.split("=")) @@ -82,6 +73,9 @@ protected HttpHandler createHandler() { exchange.close(); return; } + final var accessKey = randomIdentifier(); + final var sessionToken = randomIdentifier(); + newCredentialsConsumer.accept(accessKey, sessionToken); final byte[] response = String.format( Locale.ROOT, """ @@ -95,7 +89,7 @@ protected HttpHandler createHandler() { %s - secret_access_key + %s %s %s @@ -109,6 +103,7 @@ protected HttpHandler createHandler() { ROLE_ARN, ROLE_NAME, sessionToken, + randomSecretKey(), ZonedDateTime.now().plusDays(1L).format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ssZ")), accessKey ).getBytes(StandardCharsets.UTF_8); @@ -118,7 +113,8 @@ protected HttpHandler createHandler() { exchange.close(); return; } - delegate.handle(exchange); - }; + + ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError("not supported: " + requestMethod + " " + path)); + } } } diff --git a/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java b/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java new file mode 100644 index 0000000000000..4094ce18e7aef --- /dev/null +++ b/test/fixtures/aws-sts-fixture/src/test/java/fixture/aws/sts/AwsStsHttpHandlerTests.java @@ -0,0 +1,268 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.aws.sts; + +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpPrincipal; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.containsString; + +public class AwsStsHttpHandlerTests extends ESTestCase { + + public void testGenerateCredentials() { + final Map generatedCredentials = new HashMap<>(); + + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler(generatedCredentials::put, webIdentityToken); + + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + AwsStsHttpHandler.ROLE_NAME, + "RoleArn", + AwsStsHttpHandler.ROLE_ARN, + "WebIdentityToken", + webIdentityToken + ) + ); + assertEquals(RestStatus.OK, response.status()); + + assertThat(generatedCredentials, aMapWithSize(1)); + final var accessKey = generatedCredentials.keySet().iterator().next(); + final var sessionToken = generatedCredentials.values().iterator().next(); + + final var responseBody = response.body().utf8ToString(); + assertThat(responseBody, containsString("" + accessKey + "")); + assertThat(responseBody, containsString("" + sessionToken + "")); + } + + public void testInvalidAction() { + final var handler = new AwsStsHttpHandler((key, token) -> fail(), randomUnicodeOfLength(10)); + final var response = handleRequest(handler, Map.of("Action", "Unsupported")); + assertEquals(RestStatus.BAD_REQUEST, response.status()); + } + + public void testInvalidRole() { + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler((key, token) -> fail(), webIdentityToken); + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + randomValueOtherThan(AwsStsHttpHandler.ROLE_NAME, ESTestCase::randomIdentifier), + "RoleArn", + AwsStsHttpHandler.ROLE_ARN, + "WebIdentityToken", + webIdentityToken + ) + ); + assertEquals(RestStatus.UNAUTHORIZED, response.status()); + } + + public void testInvalidToken() { + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler((key, token) -> fail(), webIdentityToken); + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + AwsStsHttpHandler.ROLE_NAME, + "RoleArn", + AwsStsHttpHandler.ROLE_ARN, + "WebIdentityToken", + randomValueOtherThan(webIdentityToken, () -> randomUnicodeOfLength(10)) + ) + ); + assertEquals(RestStatus.UNAUTHORIZED, response.status()); + } + + public void testInvalidARN() { + final var webIdentityToken = randomUnicodeOfLength(10); + final var handler = new AwsStsHttpHandler((key, token) -> fail(), webIdentityToken); + final var response = handleRequest( + handler, + Map.of( + "Action", + "AssumeRoleWithWebIdentity", + "RoleSessionName", + AwsStsHttpHandler.ROLE_NAME, + "RoleArn", + randomValueOtherThan(AwsStsHttpHandler.ROLE_ARN, ESTestCase::randomIdentifier), + "WebIdentityToken", + webIdentityToken + ) + ); + assertEquals(RestStatus.UNAUTHORIZED, response.status()); + } + + private record TestHttpResponse(RestStatus status, BytesReference body) {} + + private static TestHttpResponse handleRequest(AwsStsHttpHandler handler, Map body) { + final var httpExchange = new TestHttpExchange( + "POST", + "/assume-role-with-web-identity/", + new BytesArray( + body.entrySet() + .stream() + .map(e -> e.getKey() + "=" + URLEncoder.encode(e.getValue(), StandardCharsets.UTF_8)) + .collect(Collectors.joining("&")) + ), + TestHttpExchange.EMPTY_HEADERS + ); + try { + handler.handle(httpExchange); + } catch (IOException e) { + fail(e); + } + assertNotEquals(0, httpExchange.getResponseCode()); + return new TestHttpResponse(RestStatus.fromCode(httpExchange.getResponseCode()), httpExchange.getResponseBodyContents()); + } + + private static class TestHttpExchange extends HttpExchange { + + private static final Headers EMPTY_HEADERS = new Headers(); + + private final String method; + private final URI uri; + private final BytesReference requestBody; + private final Headers requestHeaders; + + private final Headers responseHeaders = new Headers(); + private final BytesStreamOutput responseBody = new BytesStreamOutput(); + private int responseCode; + + TestHttpExchange(String method, String uri, BytesReference requestBody, Headers requestHeaders) { + this.method = method; + this.uri = URI.create(uri); + this.requestBody = requestBody; + this.requestHeaders = requestHeaders; + } + + @Override + public Headers getRequestHeaders() { + return requestHeaders; + } + + @Override + public Headers getResponseHeaders() { + return responseHeaders; + } + + @Override + public URI getRequestURI() { + return uri; + } + + @Override + public String getRequestMethod() { + return method; + } + + @Override + public HttpContext getHttpContext() { + return null; + } + + @Override + public void close() {} + + @Override + public InputStream getRequestBody() { + try { + return requestBody.streamInput(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public OutputStream getResponseBody() { + return responseBody; + } + + @Override + public void sendResponseHeaders(int rCode, long responseLength) { + this.responseCode = rCode; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public int getResponseCode() { + return responseCode; + } + + public BytesReference getResponseBodyContents() { + return responseBody.bytes(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public String getProtocol() { + return "HTTP/1.1"; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public void setAttribute(String name, Object value) { + fail("setAttribute not implemented"); + } + + @Override + public void setStreams(InputStream i, OutputStream o) { + fail("setStreams not implemented"); + } + + @Override + public HttpPrincipal getPrincipal() { + fail("getPrincipal not implemented"); + throw new UnsupportedOperationException("getPrincipal not implemented"); + } + } + +} diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java index 39105e0a27dc9..ab4d54f4fc451 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java @@ -45,6 +45,7 @@ public class AzureHttpFixture extends ExternalResource { private final String clientId; private final String tenantId; private final Predicate authHeaderPredicate; + private final MockAzureBlobStore.LeaseExpiryPredicate leaseExpiryPredicate; private HttpServer server; private HttpServer metadataServer; @@ -116,7 +117,8 @@ public AzureHttpFixture( String container, @Nullable String rawTenantId, @Nullable String rawClientId, - Predicate authHeaderPredicate + Predicate authHeaderPredicate, + MockAzureBlobStore.LeaseExpiryPredicate leaseExpiryPredicate ) { final var tenantId = Strings.hasText(rawTenantId) ? rawTenantId : null; final var clientId = Strings.hasText(rawClientId) ? rawClientId : null; @@ -135,6 +137,7 @@ public AzureHttpFixture( this.tenantId = tenantId; this.clientId = clientId; this.authHeaderPredicate = authHeaderPredicate; + this.leaseExpiryPredicate = leaseExpiryPredicate; } private String scheme() { @@ -193,7 +196,10 @@ protected void before() { } case HTTP -> { server = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); - server.createContext("/" + account, new AzureHttpHandler(account, container, actualAuthHeaderPredicate)); + server.createContext( + "/" + account, + new AzureHttpHandler(account, container, actualAuthHeaderPredicate, leaseExpiryPredicate) + ); server.start(); oauthTokenServiceServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); @@ -222,7 +228,10 @@ protected void before() { final var httpsServer = HttpsServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); this.server = httpsServer; httpsServer.setHttpsConfigurator(new HttpsConfigurator(sslContext)); - httpsServer.createContext("/" + account, new AzureHttpHandler(account, container, actualAuthHeaderPredicate)); + httpsServer.createContext( + "/" + account, + new AzureHttpHandler(account, container, actualAuthHeaderPredicate, leaseExpiryPredicate) + ); httpsServer.start(); } { diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index 92ce04b6bea5b..cb7c700376a1a 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -15,7 +15,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.regex.Regex; @@ -23,11 +22,11 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.test.fixture.HttpHeaderParser; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import java.io.BufferedReader; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; @@ -43,11 +42,9 @@ import java.util.Objects; import java.util.Set; import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import static fixture.azure.MockAzureBlobStore.failTestWithAssertionError; import static org.elasticsearch.repositories.azure.AzureFixtureHelper.assertValidBlockId; /** @@ -56,17 +53,28 @@ @SuppressForbidden(reason = "Uses a HttpServer to emulate an Azure endpoint") public class AzureHttpHandler implements HttpHandler { private static final Logger logger = LogManager.getLogger(AzureHttpHandler.class); + static final String X_MS_LEASE_ID = "x-ms-lease-id"; + static final String X_MS_PROPOSED_LEASE_ID = "x-ms-proposed-lease-id"; + static final String X_MS_LEASE_DURATION = "x-ms-lease-duration"; + static final String X_MS_LEASE_BREAK_PERIOD = "x-ms-lease-break-period"; + static final String X_MS_BLOB_TYPE = "x-ms-blob-type"; + static final String X_MS_BLOB_CONTENT_LENGTH = "x-ms-blob-content-length"; - private final Map blobs; private final String account; private final String container; private final Predicate authHeaderPredicate; - - public AzureHttpHandler(final String account, final String container, @Nullable Predicate authHeaderPredicate) { + private final MockAzureBlobStore mockAzureBlobStore; + + public AzureHttpHandler( + final String account, + final String container, + @Nullable Predicate authHeaderPredicate, + MockAzureBlobStore.LeaseExpiryPredicate leaseExpiryPredicate + ) { this.account = Objects.requireNonNull(account); this.container = Objects.requireNonNull(container); this.authHeaderPredicate = authHeaderPredicate; - this.blobs = new ConcurrentHashMap<>(); + this.mockAzureBlobStore = new MockAzureBlobStore(leaseExpiryPredicate); } private static List getAuthHeader(HttpExchange exchange) { @@ -134,7 +142,7 @@ public void handle(final HttpExchange exchange) throws IOException { final String blockId = params.get("blockid"); assert assertValidBlockId(blockId); - blobs.put(blockId, Streams.readFully(exchange.getRequestBody())); + mockAzureBlobStore.putBlock(blobPath(exchange), blockId, Streams.readFully(exchange.getRequestBody()), leaseId(exchange)); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); } else if (Regex.simpleMatch("PUT /" + account + "/" + container + "/*comp=blocklist*", request)) { @@ -145,83 +153,121 @@ public void handle(final HttpExchange exchange) throws IOException { .map(line -> line.substring(0, line.indexOf(""))) .toList(); - final ByteArrayOutputStream blob = new ByteArrayOutputStream(); - for (String blockId : blockIds) { - BytesReference block = blobs.remove(blockId); - assert block != null; - block.writeTo(blob); - } - blobs.put(exchange.getRequestURI().getPath(), new BytesArray(blob.toByteArray())); + mockAzureBlobStore.putBlockList(blobPath(exchange), blockIds, leaseId(exchange)); exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); + } else if (Regex.simpleMatch("PUT /" + account + "/" + container + "*comp=lease*", request)) { + // Lease Blob (https://learn.microsoft.com/en-us/rest/api/storageservices/lease-blob) + final String leaseAction = requireHeader(exchange, "x-ms-lease-action"); + + switch (leaseAction) { + case "acquire" -> { + final int leaseDurationSeconds = requireIntegerHeader(exchange, X_MS_LEASE_DURATION); + final String proposedLeaseId = exchange.getRequestHeaders().getFirst(X_MS_PROPOSED_LEASE_ID); + final String newLeaseId = mockAzureBlobStore.acquireLease( + blobPath(exchange), + leaseDurationSeconds, + proposedLeaseId + ); + exchange.getResponseHeaders().set(X_MS_LEASE_ID, newLeaseId); + exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); + } + case "release" -> { + final String leaseId = requireHeader(exchange, X_MS_LEASE_ID); + mockAzureBlobStore.releaseLease(blobPath(exchange), leaseId); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + } + case "break" -> { + mockAzureBlobStore.breakLease(blobPath(exchange), getOptionalIntegerHeader(exchange, X_MS_LEASE_BREAK_PERIOD)); + exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), -1); + } + case "renew", "change" -> { + failTestWithAssertionError("Attempt was made to use not-implemented lease action: " + leaseAction); + throw new MockAzureBlobStore.AzureBlobStoreError( + RestStatus.NOT_IMPLEMENTED, + "NotImplemented", + "Attempted to use unsupported lease API: " + leaseAction + ); + } + default -> { + failTestWithAssertionError("Unrecognized lease action: " + leaseAction); + throw new MockAzureBlobStore.BadRequestException( + "InvalidHeaderValue", + "Invalid x-ms-lease-action header: " + leaseAction + ); + } + } } else if (Regex.simpleMatch("PUT /" + account + "/" + container + "/*", request)) { // PUT Blob (see https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob) + final String blobType = requireHeader(exchange, X_MS_BLOB_TYPE); final String ifNoneMatch = exchange.getRequestHeaders().getFirst("If-None-Match"); - if ("*".equals(ifNoneMatch)) { - if (blobs.putIfAbsent(exchange.getRequestURI().getPath(), Streams.readFully(exchange.getRequestBody())) != null) { - sendError(exchange, RestStatus.CONFLICT); - return; - } - } else { - blobs.put(exchange.getRequestURI().getPath(), Streams.readFully(exchange.getRequestBody())); - } + mockAzureBlobStore.putBlob( + blobPath(exchange), + Streams.readFully(exchange.getRequestBody()), + blobType, + ifNoneMatch, + leaseId(exchange) + ); exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); } else if (Regex.simpleMatch("HEAD /" + account + "/" + container + "/*", request)) { // Get Blob Properties (see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties) - final BytesReference blob = blobs.get(exchange.getRequestURI().getPath()); - if (blob == null) { - sendError(exchange, RestStatus.NOT_FOUND); - return; - } - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(blob.length())); - exchange.getResponseHeaders().add("Content-Length", String.valueOf(blob.length())); - exchange.getResponseHeaders().add("x-ms-blob-type", "BlockBlob"); + final MockAzureBlobStore.AzureBlockBlob blob = mockAzureBlobStore.getBlob(blobPath(exchange), leaseId(exchange)); + + final Headers responseHeaders = exchange.getResponseHeaders(); + final BytesReference blobContents = blob.getContents(); + responseHeaders.add(X_MS_BLOB_CONTENT_LENGTH, String.valueOf(blobContents.length())); + responseHeaders.add("Content-Length", String.valueOf(blobContents.length())); + responseHeaders.add(X_MS_BLOB_TYPE, blob.type()); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); } else if (Regex.simpleMatch("GET /" + account + "/" + container + "/*", request)) { - // GET Object (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html) - final BytesReference blob = blobs.get(exchange.getRequestURI().getPath()); - if (blob == null) { - sendError(exchange, RestStatus.NOT_FOUND); - return; - } + // Get Blob (https://learn.microsoft.com/en-us/rest/api/storageservices/get-blob) + final MockAzureBlobStore.AzureBlockBlob blob = mockAzureBlobStore.getBlob(blobPath(exchange), leaseId(exchange)); + final BytesReference responseContent; + final RestStatus successStatus; // see Constants.HeaderConstants.STORAGE_RANGE_HEADER - final String range = exchange.getRequestHeaders().getFirst("x-ms-range"); - final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(range); - if (matcher.matches() == false) { - throw new AssertionError("Range header does not match expected format: " + range); - } + final String rangeHeader = exchange.getRequestHeaders().getFirst("x-ms-range"); + if (rangeHeader != null) { + final HttpHeaderParser.Range range = HttpHeaderParser.parseRangeHeader(rangeHeader); + if (range == null) { + throw new MockAzureBlobStore.BadRequestException( + "InvalidHeaderValue", + "Range header does not match expected format: " + rangeHeader + ); + } - final long start = Long.parseLong(matcher.group(1)); - final long end = Long.parseLong(matcher.group(2)); + final BytesReference blobContents = blob.getContents(); + if (blobContents.length() <= range.start()) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.sendResponseHeaders(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus(), -1); + return; + } - if (blob.length() <= start) { - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.sendResponseHeaders(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus(), -1); - return; + responseContent = blobContents.slice( + Math.toIntExact(range.start()), + Math.toIntExact(Math.min(range.end() - range.start() + 1, blobContents.length() - range.start())) + ); + successStatus = RestStatus.PARTIAL_CONTENT; + } else { + responseContent = blob.getContents(); + successStatus = RestStatus.OK; } - var responseBlob = blob.slice(Math.toIntExact(start), Math.toIntExact(Math.min(end - start + 1, blob.length() - start))); - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(responseBlob.length())); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.getResponseHeaders().add(X_MS_BLOB_CONTENT_LENGTH, String.valueOf(responseContent.length())); + exchange.getResponseHeaders().add(X_MS_BLOB_TYPE, blob.type()); exchange.getResponseHeaders().add("ETag", "\"blockblob\""); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), responseBlob.length()); - responseBlob.writeTo(exchange.getResponseBody()); + exchange.sendResponseHeaders(successStatus.getStatus(), responseContent.length() == 0 ? -1 : responseContent.length()); + responseContent.writeTo(exchange.getResponseBody()); } else if (Regex.simpleMatch("DELETE /" + account + "/" + container + "/*", request)) { // Delete Blob (https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob) - final boolean deleted = blobs.entrySet().removeIf(blob -> blob.getKey().startsWith(exchange.getRequestURI().getPath())); - if (deleted) { - exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), -1); - } else { - exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); - } + mockAzureBlobStore.deleteBlob(blobPath(exchange), leaseId(exchange)); + exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), -1); } else if (Regex.simpleMatch("GET /" + account + "/" + container + "?*restype=container*comp=list*", request)) { // List Blobs (https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs) @@ -239,11 +285,12 @@ public void handle(final HttpExchange exchange) throws IOException { list.append("").append(delimiter).append(""); } list.append(""); - for (Map.Entry blob : blobs.entrySet()) { - if (prefix != null && blob.getKey().startsWith("/" + account + "/" + container + "/" + prefix) == false) { - continue; - } - String blobPath = blob.getKey().replace("/" + account + "/" + container + "/", ""); + final Map matchingBlobs = mockAzureBlobStore.listBlobs( + prefix, + leaseId(exchange) + ); + for (Map.Entry blob : matchingBlobs.entrySet()) { + final String blobPath = blob.getKey(); if (delimiter != null) { int fromIndex = (prefix != null ? prefix.length() : 0); int delimiterPosition = blobPath.indexOf(delimiter, fromIndex); @@ -259,7 +306,7 @@ public void handle(final HttpExchange exchange) throws IOException { %s BlockBlob - """, blobPath, blob.getValue().length())); + """, blobPath, blob.getValue().getContents().length())); } if (blobPrefixes.isEmpty() == false) { blobPrefixes.forEach(p -> list.append("").append(p).append("")); @@ -294,7 +341,8 @@ public void handle(final HttpExchange exchange) throws IOException { } // Process the deletion - if (blobs.remove("/" + account + toDelete) != null) { + try { + mockAzureBlobStore.deleteBlob(toDelete, leaseId(exchange)); final String acceptedPart = Strings.format(""" --%s Content-Type: application/http @@ -307,32 +355,43 @@ public void handle(final HttpExchange exchange) throws IOException { """, responseBoundary, contentId, requestId).replaceAll("\n", "\r\n"); response.append(acceptedPart); - } else { - final String notFoundBody = Strings.format( + } catch (MockAzureBlobStore.AzureBlobStoreError e) { + final String errorResponseBody = Strings.format( """ - BlobNotFoundThe specified blob does not exist. + %s%s RequestId:%s Time:%s""", + e.getErrorCode(), + e.getMessage(), requestId, DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now(ZoneId.of("UTC"))) ); - final String notFoundPart = Strings.format(""" - --%s - Content-Type: application/http - Content-ID: %s - - HTTP/1.1 404 The specified blob does not exist. - x-ms-error-code: BlobNotFound - x-ms-request-id: %s - x-ms-version: 2018-11-09 - Content-Length: %d - Content-Type: application/xml - - %s - """, responseBoundary, contentId, requestId, notFoundBody.length(), notFoundBody) - .replaceAll("\n", "\r\n"); - response.append(notFoundPart); + final String errorResponsePart = Strings.format( + """ + --%s + Content-Type: application/http + Content-ID: %s + + HTTP/1.1 %s %s + x-ms-error-code: %s + x-ms-request-id: %s + x-ms-version: 2018-11-09 + Content-Length: %d + Content-Type: application/xml + + %s + """, + responseBoundary, + contentId, + e.getRestStatus().getStatus(), + e.getMessage(), + e.getErrorCode(), + requestId, + errorResponseBody.length(), + errorResponseBody + ).replaceAll("\n", "\r\n"); + response.append(errorResponsePart); } // Clear the state @@ -350,11 +409,18 @@ public void handle(final HttpExchange exchange) throws IOException { } contentId = line.split("\\s")[1]; } else if (Regex.simpleMatch("DELETE /" + container + "/*", line)) { - String blobName = RestUtils.decodeComponent(line.split("(\\s|\\?)")[1]); + final String path = RestUtils.decodeComponent(line.split("(\\s|\\?)")[1]); + if (toDelete != null) { + throw new IllegalStateException("Got multiple deletes in a single request?"); + } + toDelete = stripPrefix("/" + container + "/", path); + } else if (Regex.simpleMatch("DELETE /" + account + "/" + container + "/*", line)) { + // possible alternative DELETE url, depending on which method is used in the batch client + String path = RestUtils.decodeComponent(line.split("(\\s|\\?)")[1]); if (toDelete != null) { throw new IllegalStateException("Got multiple deletes in a single request?"); } - toDelete = blobName; + toDelete = stripPrefix("/" + account + "/" + container + "/", path); } } response.append("--").append(responseBoundary).append("--\r\n0\r\n"); @@ -364,20 +430,90 @@ public void handle(final HttpExchange exchange) throws IOException { logger.debug("--> Sending response:\n{}", response); exchange.getResponseBody().write(response.toString().getBytes(StandardCharsets.UTF_8)); } - } else { - logger.warn("--> Unrecognised request received: {}", request); - sendError(exchange, RestStatus.BAD_REQUEST); - } + } else if (Regex.simpleMatch("PUT /*/*/*master.dat", request) + && Regex.simpleMatch("PUT /" + account + "/" + container + "*", request) == false) { + // An attempt to put master.dat to a different container. This is probably + // org.elasticsearch.repositories.blobstore.BlobStoreRepository#startVerification + throw new MockAzureBlobStore.AzureBlobStoreError( + RestStatus.NOT_FOUND, + "ContainerNotFound", + "The specified container does not exist." + ); + } else if (Regex.simpleMatch("GET /*/*restype=container*comp=list*", request) + && Regex.simpleMatch("GET /" + account + "/" + container + "*", request) == false) { + // An attempt to list the contents of a different container. This is probably + // org.elasticsearch.repositories.blobstore.BlobStoreRepository#startVerification for a read-only + // repository + throw new MockAzureBlobStore.AzureBlobStoreError( + RestStatus.NOT_FOUND, + "ContainerNotFound", + "The specified container does not exist." + ); + } else { + final String message = "You sent a request that is not supported by AzureHttpHandler: " + request; + failTestWithAssertionError(message); + throw new MockAzureBlobStore.BadRequestException("UnrecognisedRequest", message); + } + } catch (MockAzureBlobStore.AzureBlobStoreError e) { + sendError(exchange, e); + } catch (Exception e) { + failTestWithAssertionError("Uncaught exception", e); + sendError(exchange, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", e.getMessage()); } finally { exchange.close(); } } + private String requireHeader(HttpExchange exchange, String headerName) { + final String headerValue = exchange.getRequestHeaders().getFirst(headerName); + if (headerValue == null) { + throw new MockAzureBlobStore.BadRequestException("MissingRequiredHeader", "Missing " + headerName + " header"); + } + return headerValue; + } + + private int requireIntegerHeader(HttpExchange exchange, String headerName) { + final String headerValue = requireHeader(exchange, headerName); + try { + return Integer.parseInt(headerValue); + } catch (NumberFormatException e) { + throw new MockAzureBlobStore.BadRequestException("InvalidHeaderValue", "Invalid " + headerName + " header"); + } + } + + @Nullable + private Integer getOptionalIntegerHeader(HttpExchange exchange, String headerName) { + final String headerValue = exchange.getRequestHeaders().getFirst(headerName); + try { + return headerValue == null ? null : Integer.parseInt(headerValue); + } catch (NumberFormatException e) { + throw new MockAzureBlobStore.BadRequestException("InvalidHeaderValue", "Invalid " + headerName + " header"); + } + } + + @Nullable + private String leaseId(HttpExchange exchange) { + return exchange.getRequestHeaders().getFirst(X_MS_LEASE_ID); + } + + private String blobPath(HttpExchange exchange) { + return stripPrefix("/" + account + "/" + container + "/", exchange.getRequestURI().getPath()); + } + public Map blobs() { - return blobs; + return mockAzureBlobStore.blobs(); + } + + public static void sendError(HttpExchange exchange, MockAzureBlobStore.AzureBlobStoreError error) throws IOException { + sendError(exchange, error.getRestStatus(), error.getErrorCode(), error.getMessage()); } public static void sendError(final HttpExchange exchange, final RestStatus status) throws IOException { + final String errorCode = toAzureErrorCode(status); + sendError(exchange, status, errorCode, status.toString()); + } + + public static void sendError(HttpExchange exchange, RestStatus restStatus, String errorCode, String errorMessage) throws IOException { final Headers headers = exchange.getResponseHeaders(); headers.add("Content-Type", "application/xml"); @@ -388,20 +524,19 @@ public static void sendError(final HttpExchange exchange, final RestStatus statu headers.add("x-ms-request-id", requestId); } - final String errorCode = toAzureErrorCode(status); // see Constants.HeaderConstants.ERROR_CODE headers.add("x-ms-error-code", errorCode); if ("HEAD".equals(exchange.getRequestMethod())) { - exchange.sendResponseHeaders(status.getStatus(), -1L); + exchange.sendResponseHeaders(restStatus.getStatus(), -1L); } else { final byte[] response = (String.format(Locale.ROOT, """ %s %s - """, errorCode, status)).getBytes(StandardCharsets.UTF_8); - exchange.sendResponseHeaders(status.getStatus(), response.length); + """, errorCode, errorMessage)).getBytes(StandardCharsets.UTF_8); + exchange.sendResponseHeaders(restStatus.getStatus(), response.length); exchange.getResponseBody().write(response); } } @@ -420,4 +555,9 @@ private static String toAzureErrorCode(final RestStatus status) { ); }; } + + private String stripPrefix(String prefix, String toStrip) { + assert toStrip.startsWith(prefix); + return toStrip.substring(prefix.length()); + } } diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/MockAzureBlobStore.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/MockAzureBlobStore.java new file mode 100644 index 0000000000000..c694c27c1293b --- /dev/null +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/MockAzureBlobStore.java @@ -0,0 +1,484 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.azure; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.rest.RestStatus; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +public class MockAzureBlobStore { + + private static final Logger logger = LogManager.getLogger(MockAzureBlobStore.class); + private static final String BLOCK_BLOB_TYPE = "BlockBlob"; + private static final String PAGE_BLOB_TYPE = "PageBlob"; + private static final String APPEND_BLOB_TYPE = "AppendBlob"; + + private final LeaseExpiryPredicate leaseExpiryPredicate; + private final Map blobs; + + /** + * Provide the means of triggering lease expiration + * + * @param leaseExpiryPredicate A Predicate that takes an active lease ID and returns true when it should be expired, or null to never fail leases + */ + public MockAzureBlobStore(LeaseExpiryPredicate leaseExpiryPredicate) { + this.blobs = new ConcurrentHashMap<>(); + this.leaseExpiryPredicate = Objects.requireNonNull(leaseExpiryPredicate); + } + + public void putBlock(String path, String blockId, BytesReference content, @Nullable String leaseId) { + blobs.compute(path, (p, existing) -> { + if (existing != null) { + existing.putBlock(blockId, content, leaseId); + return existing; + } else { + final AzureBlockBlob azureBlockBlob = new AzureBlockBlob(); + azureBlockBlob.putBlock(blockId, content, leaseId); + return azureBlockBlob; + } + }); + } + + public void putBlockList(String path, List blockIds, @Nullable String leaseId) { + final AzureBlockBlob blob = getExistingBlob(path); + blob.putBlockList(blockIds, leaseId); + } + + public void putBlob(String path, BytesReference contents, String blobType, @Nullable String ifNoneMatch, @Nullable String leaseId) { + blobs.compute(path, (p, existingValue) -> { + if (existingValue != null) { + existingValue.setContents(contents, leaseId, ifNoneMatch); + return existingValue; + } else { + validateBlobType(blobType); + final AzureBlockBlob newBlob = new AzureBlockBlob(); + newBlob.setContents(contents, leaseId); + return newBlob; + } + }); + } + + private void validateBlobType(String blobType) { + if (BLOCK_BLOB_TYPE.equals(blobType)) { + return; + } + final String errorMessage; + if (PAGE_BLOB_TYPE.equals(blobType) || APPEND_BLOB_TYPE.equals(blobType)) { + errorMessage = "Only BlockBlob is supported. This is a limitation of the MockAzureBlobStore"; + } else { + errorMessage = "Invalid blobType: " + blobType; + } + // Fail the test and respond with an error + failTestWithAssertionError(errorMessage); + throw new MockAzureBlobStore.BadRequestException("InvalidHeaderValue", errorMessage); + } + + public AzureBlockBlob getBlob(String path, @Nullable String leaseId) { + final AzureBlockBlob blob = getExistingBlob(path); + // This is the public implementation of "get blob" which will 404 for uncommitted block blobs + if (blob.isCommitted() == false) { + throw new BlobNotFoundException(); + } + blob.checkLeaseForRead(leaseId); + return blob; + } + + public void deleteBlob(String path, @Nullable String leaseId) { + final AzureBlockBlob blob = getExistingBlob(path); + blob.checkLeaseForWrite(leaseId); + blobs.remove(path); + } + + public Map listBlobs(String prefix, @Nullable String leaseId) { + return blobs.entrySet().stream().filter(e -> { + if (prefix == null || e.getKey().startsWith(prefix)) { + return true; + } + return false; + }) + .filter(e -> e.getValue().isCommitted()) + .peek(e -> e.getValue().checkLeaseForRead(leaseId)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + public String acquireLease(String path, int leaseTimeSeconds, @Nullable String proposedLeaseId) { + final AzureBlockBlob blob = getExistingBlob(path); + return blob.acquireLease(proposedLeaseId, leaseTimeSeconds); + } + + public void releaseLease(String path, @Nullable String leaseId) { + final AzureBlockBlob blob = getExistingBlob(path); + blob.releaseLease(leaseId); + } + + public void breakLease(String path, @Nullable Integer leaseBreakPeriod) { + final AzureBlockBlob blob = getExistingBlob(path); + blob.breakLease(leaseBreakPeriod); + } + + public Map blobs() { + return Maps.transformValues(blobs, AzureBlockBlob::getContents); + } + + private AzureBlockBlob getExistingBlob(String path) { + final AzureBlockBlob blob = blobs.get(path); + if (blob == null) { + throw new BlobNotFoundException(); + } + return blob; + } + + static void failTestWithAssertionError(String message) { + ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError(message)); + } + + static void failTestWithAssertionError(String message, Throwable throwable) { + ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError(message, throwable)); + } + + public class AzureBlockBlob { + private final Object writeLock = new Object(); + private final Lease lease = new Lease(); + private final Map blocks; + private volatile BytesReference contents; + + private AzureBlockBlob() { + this.blocks = new ConcurrentHashMap<>(); + } + + public void putBlock(String blockId, BytesReference content, @Nullable String leaseId) { + synchronized (writeLock) { + lease.checkLeaseForWrite(leaseId); + this.blocks.put(blockId, content); + } + } + + public void putBlockList(List blockIds, @Nullable String leaseId) throws BadRequestException { + synchronized (writeLock) { + lease.checkLeaseForWrite(leaseId); + final List unresolvedBlocks = blockIds.stream().filter(bId -> blocks.containsKey(bId) == false).toList(); + if (unresolvedBlocks.isEmpty() == false) { + logger.warn("Block list contained non-existent block IDs: {}", unresolvedBlocks); + throw new BadRequestException("InvalidBlockList", "The specified blocklist is invalid."); + } + final BytesReference[] resolvedContents = blockIds.stream().map(blocks::get).toList().toArray(new BytesReference[0]); + contents = CompositeBytesReference.of(resolvedContents); + } + } + + private boolean matches(String ifNoneMatchHeaderValue) { + if (ifNoneMatchHeaderValue == null) { + return false; + } + // We only support * + if ("*".equals(ifNoneMatchHeaderValue)) { + return true; + } + // Fail the test, trigger an internal server error + failTestWithAssertionError("We've only implemented 'If-None-Match: *' in the MockAzureBlobStore"); + throw new AzureBlobStoreError( + RestStatus.INTERNAL_SERVER_ERROR, + "UnsupportedHeader", + "The test fixture only supports * for If-None-Match" + ); + } + + public synchronized void setContents(BytesReference contents, @Nullable String leaseId) { + synchronized (writeLock) { + lease.checkLeaseForWrite(leaseId); + this.contents = contents; + this.blocks.clear(); + } + } + + public void setContents(BytesReference contents, @Nullable String leaseId, @Nullable String ifNoneMatchHeaderValue) { + synchronized (writeLock) { + if (matches(ifNoneMatchHeaderValue)) { + throw new PreconditionFailedException( + "TargetConditionNotMet", + "The target condition specified using HTTP conditional header(s) is not met." + ); + } + setContents(contents, leaseId); + } + } + + /** + * Get the committed contents of the blob + * + * @return The last committed contents of the blob, or null if the blob is uncommitted + */ + @Nullable + public BytesReference getContents() { + return contents; + } + + public String type() { + return BLOCK_BLOB_TYPE; + } + + public boolean isCommitted() { + return contents != null; + } + + @Override + public String toString() { + return "MockAzureBlockBlob{" + "blocks=" + blocks + ", contents=" + contents + '}'; + } + + public String acquireLease(@Nullable String proposedLeaseId, int leaseTimeSeconds) { + synchronized (writeLock) { + return lease.acquire(proposedLeaseId, leaseTimeSeconds); + } + } + + public void releaseLease(String leaseId) { + synchronized (writeLock) { + lease.release(leaseId); + } + } + + public void breakLease(@Nullable Integer leaseBreakPeriod) { + synchronized (writeLock) { + lease.breakLease(leaseBreakPeriod); + } + } + + public void checkLeaseForRead(@Nullable String leaseId) { + lease.checkLeaseForRead(leaseId); + } + + public void checkLeaseForWrite(@Nullable String leaseId) { + lease.checkLeaseForWrite(leaseId); + } + } + + /** + * @see acquire/release rules + * @see read/write rules + */ + public class Lease { + + /** + * Minimal set of states, we don't support breaking/broken + */ + enum State { + Available, + Leased, + Expired, + Broken + } + + private String leaseId; + private State state = State.Available; + private int leaseDurationSeconds; + + public synchronized String acquire(@Nullable String proposedLeaseId, int leaseDurationSeconds) { + maybeExpire(proposedLeaseId); + switch (state) { + case Available, Expired, Broken -> { + final State prevState = state; + state = State.Leased; + leaseId = proposedLeaseId != null ? proposedLeaseId : UUID.randomUUID().toString(); + validateLeaseDuration(leaseDurationSeconds); + this.leaseDurationSeconds = leaseDurationSeconds; + logger.debug("Granting lease, prior state={}, leaseId={}, expires={}", prevState, leaseId); + } + case Leased -> { + if (leaseId.equals(proposedLeaseId) == false) { + logger.debug("Mismatch on acquire - proposed leaseId: {}, active leaseId: {}", proposedLeaseId, leaseId); + throw new ConflictException( + "LeaseIdMismatchWithLeaseOperation", + "The lease ID specified did not match the lease ID for the blob/container." + ); + } + validateLeaseDuration(leaseDurationSeconds); + } + } + return leaseId; + } + + public synchronized void release(String requestLeaseId) { + switch (state) { + case Available -> throw new ConflictException( + "LeaseNotPresentWithLeaseOperation", + "There is currently no lease on the blob/container." + ); + case Leased, Expired, Broken -> { + if (leaseId.equals(requestLeaseId) == false) { + logger.debug("Mismatch on release - submitted leaseId: {}, active leaseId: {}", requestLeaseId, this.leaseId); + throw new ConflictException( + "LeaseIdMismatchWithLeaseOperation", + "The lease ID specified did not match the lease ID for the blob/container." + ); + } + state = State.Available; + this.leaseId = null; + } + } + } + + public synchronized void breakLease(Integer leaseBreakPeriod) { + // We haven't implemented the "Breaking" state so we don't support 'breaks' for non-infinite leases unless break-period is 0 + if (leaseDurationSeconds != -1 && (leaseBreakPeriod == null || leaseBreakPeriod != 0)) { + failTestWithAssertionError( + "MockAzureBlobStore only supports breaking non-infinite leases with 'x-ms-lease-break-period: 0'" + ); + } + switch (state) { + case Available -> throw new ConflictException( + "LeaseNotPresentWithLeaseOperation", + "There is currently no lease on the blob/container." + ); + case Leased, Expired, Broken -> state = State.Broken; + } + } + + public synchronized void checkLeaseForWrite(@Nullable String requestLeaseId) { + maybeExpire(requestLeaseId); + switch (state) { + case Available, Expired, Broken -> { + if (requestLeaseId != null) { + throw new PreconditionFailedException( + "LeaseLost", + "A lease ID was specified, but the lease for the blob/container has expired." + ); + } + } + case Leased -> { + if (requestLeaseId == null) { + throw new PreconditionFailedException( + "LeaseIdMissing", + "There is currently a lease on the blob/container and no lease ID was specified in the request." + ); + } + if (leaseId.equals(requestLeaseId) == false) { + throw new ConflictException( + "LeaseIdMismatchWithBlobOperation", + "The lease ID specified did not match the lease ID for the blob." + ); + } + } + } + } + + public synchronized void checkLeaseForRead(@Nullable String requestLeaseId) { + maybeExpire(requestLeaseId); + switch (state) { + case Available, Expired, Broken -> { + if (requestLeaseId != null) { + throw new PreconditionFailedException( + "LeaseLost", + "A lease ID was specified, but the lease for the blob/container has expired." + ); + } + } + case Leased -> { + if (requestLeaseId != null && requestLeaseId.equals(leaseId) == false) { + throw new ConflictException( + "LeaseIdMismatchWithBlobOperation", + "The lease ID specified did not match the lease ID for the blob." + ); + } + } + } + } + + /** + * If there's an active lease, ask the predicate if we should expire the existing it + * + * @param requestLeaseId The lease of the request + */ + private void maybeExpire(String requestLeaseId) { + if (state == State.Leased && leaseExpiryPredicate.shouldExpireLease(leaseId, requestLeaseId)) { + logger.debug("Expiring lease, id={}", leaseId); + state = State.Expired; + } + } + + private void validateLeaseDuration(long leaseTimeSeconds) { + if (leaseTimeSeconds != -1 && (leaseTimeSeconds < 15 || leaseTimeSeconds > 60)) { + throw new BadRequestException( + "InvalidHeaderValue", + AzureHttpHandler.X_MS_LEASE_DURATION + " must be between 16 and 60 seconds (was " + leaseTimeSeconds + ")" + ); + } + } + } + + public static class AzureBlobStoreError extends RuntimeException { + private final RestStatus restStatus; + private final String errorCode; + + public AzureBlobStoreError(RestStatus restStatus, String errorCode, String message) { + super(message); + this.restStatus = restStatus; + this.errorCode = errorCode; + } + + public RestStatus getRestStatus() { + return restStatus; + } + + public String getErrorCode() { + return errorCode; + } + } + + public static class BlobNotFoundException extends AzureBlobStoreError { + public BlobNotFoundException() { + super(RestStatus.NOT_FOUND, "BlobNotFound", "The specified blob does not exist."); + } + } + + public static class BadRequestException extends AzureBlobStoreError { + public BadRequestException(String errorCode, String message) { + super(RestStatus.BAD_REQUEST, errorCode, message); + } + } + + public static class ConflictException extends AzureBlobStoreError { + public ConflictException(String errorCode, String message) { + super(RestStatus.CONFLICT, errorCode, message); + } + } + + public static class PreconditionFailedException extends AzureBlobStoreError { + public PreconditionFailedException(String errorCode, String message) { + super(RestStatus.PRECONDITION_FAILED, errorCode, message); + } + } + + public interface LeaseExpiryPredicate { + + LeaseExpiryPredicate NEVER_EXPIRE = (activeLeaseId, requestLeaseId) -> false; + + /** + * Should the lease be expired? + * + * @param activeLeaseId The current active lease ID + * @param requestLeaseId The request lease ID (if any) + * @return true to expire the lease, false otherwise + */ + boolean shouldExpireLease(String activeLeaseId, @Nullable String requestLeaseId); + } +} diff --git a/test/fixtures/build.gradle b/test/fixtures/build.gradle index 02d68517903a3..e69de29bb2d1d 100644 --- a/test/fixtures/build.gradle +++ b/test/fixtures/build.gradle @@ -1,9 +0,0 @@ - -subprojects { - // fixtures don't have tests, these are external projects used by the build - pluginManager.withPlugin('java') { - tasks.named('test').configure { - enabled = false - } - } -} diff --git a/test/fixtures/ec2-imds-fixture/build.gradle b/test/fixtures/ec2-imds-fixture/build.gradle new file mode 100644 index 0000000000000..7ad194acbb8fd --- /dev/null +++ b/test/fixtures/ec2-imds-fixture/build.gradle @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +apply plugin: 'elasticsearch.java' + +description = 'Fixture for emulating the Instance Metadata Service (IMDS) running in AWS EC2' + +dependencies { + api project(':server') + api("junit:junit:${versions.junit}") { + transitive = false + } + api project(':test:framework') +} diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java new file mode 100644 index 0000000000000..c63c65a750d7c --- /dev/null +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpFixture.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package fixture.aws.imds; + +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; + +import org.junit.rules.ExternalResource; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Objects; +import java.util.Set; +import java.util.function.BiConsumer; + +public class Ec2ImdsHttpFixture extends ExternalResource { + + private HttpServer server; + + private final Ec2ImdsVersion ec2ImdsVersion; + private final BiConsumer newCredentialsConsumer; + private final Set alternativeCredentialsEndpoints; + + public Ec2ImdsHttpFixture( + Ec2ImdsVersion ec2ImdsVersion, + BiConsumer newCredentialsConsumer, + Set alternativeCredentialsEndpoints + ) { + this.ec2ImdsVersion = Objects.requireNonNull(ec2ImdsVersion); + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.alternativeCredentialsEndpoints = Objects.requireNonNull(alternativeCredentialsEndpoints); + } + + protected HttpHandler createHandler() { + return new Ec2ImdsHttpHandler(ec2ImdsVersion, newCredentialsConsumer, alternativeCredentialsEndpoints); + } + + public String getAddress() { + return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort(); + } + + public void stop(int delay) { + server.stop(delay); + } + + protected void before() throws Throwable { + server = HttpServer.create(resolveAddress(), 0); + server.createContext("/", Objects.requireNonNull(createHandler())); + server.start(); + } + + @Override + protected void after() { + stop(0); + } + + private static InetSocketAddress resolveAddress() { + try { + return new InetSocketAddress(InetAddress.getByName("localhost"), 0); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } +} diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java new file mode 100644 index 0000000000000..281465b96de05 --- /dev/null +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package fixture.aws.imds; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Clock; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Collection; +import java.util.Objects; +import java.util.Set; +import java.util.function.BiConsumer; + +import static org.elasticsearch.test.ESTestCase.randomIdentifier; +import static org.elasticsearch.test.ESTestCase.randomSecretKey; + +/** + * Minimal HTTP handler that emulates the EC2 IMDS server + */ +@SuppressForbidden(reason = "this test uses a HttpServer to emulate the EC2 IMDS endpoint") +public class Ec2ImdsHttpHandler implements HttpHandler { + + private static final String IMDS_SECURITY_CREDENTIALS_PATH = "/latest/meta-data/iam/security-credentials/"; + + private final Ec2ImdsVersion ec2ImdsVersion; + private final Set validImdsTokens = ConcurrentCollections.newConcurrentSet(); + + private final BiConsumer newCredentialsConsumer; + private final Set validCredentialsEndpoints = ConcurrentCollections.newConcurrentSet(); + + public Ec2ImdsHttpHandler( + Ec2ImdsVersion ec2ImdsVersion, + BiConsumer newCredentialsConsumer, + Collection alternativeCredentialsEndpoints + ) { + this.ec2ImdsVersion = Objects.requireNonNull(ec2ImdsVersion); + this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); + this.validCredentialsEndpoints.addAll(alternativeCredentialsEndpoints); + } + + @Override + public void handle(final HttpExchange exchange) throws IOException { + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + + try (exchange) { + final var path = exchange.getRequestURI().getPath(); + final var requestMethod = exchange.getRequestMethod(); + + if ("PUT".equals(requestMethod) && "/latest/api/token".equals(path)) { + switch (ec2ImdsVersion) { + case V1 -> exchange.sendResponseHeaders(RestStatus.METHOD_NOT_ALLOWED.getStatus(), -1); + case V2 -> { + final var token = randomSecretKey(); + validImdsTokens.add(token); + final var responseBody = token.getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "text/plain"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), responseBody.length); + exchange.getResponseBody().write(responseBody); + } + } + return; + } + + if (ec2ImdsVersion == Ec2ImdsVersion.V2) { + final var token = exchange.getRequestHeaders().getFirst("X-aws-ec2-metadata-token"); + if (token == null) { + exchange.sendResponseHeaders(RestStatus.UNAUTHORIZED.getStatus(), -1); + return; + } + if (validImdsTokens.contains(token) == false) { + exchange.sendResponseHeaders(RestStatus.FORBIDDEN.getStatus(), -1); + return; + } + } + + if ("GET".equals(requestMethod)) { + if (path.equals(IMDS_SECURITY_CREDENTIALS_PATH)) { + final var profileName = randomIdentifier(); + validCredentialsEndpoints.add(IMDS_SECURITY_CREDENTIALS_PATH + profileName); + final byte[] response = profileName.getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "text/plain"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + return; + } else if (validCredentialsEndpoints.contains(path)) { + final String accessKey = randomIdentifier(); + final String sessionToken = randomIdentifier(); + newCredentialsConsumer.accept(accessKey, sessionToken); + final byte[] response = Strings.format( + """ + { + "AccessKeyId": "%s", + "Expiration": "%s", + "RoleArn": "%s", + "SecretAccessKey": "%s", + "Token": "%s" + }""", + accessKey, + ZonedDateTime.now(Clock.systemUTC()).plusDays(1L).format(DateTimeFormatter.ISO_DATE_TIME), + randomIdentifier(), + randomSecretKey(), + sessionToken + ).getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/json"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + return; + } + } + + ExceptionsHelper.maybeDieOnAnotherThread(new AssertionError("not supported: " + requestMethod + " " + path)); + } + } +} diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsVersion.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsVersion.java new file mode 100644 index 0000000000000..7ed028c374cc7 --- /dev/null +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsVersion.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.aws.imds; + +/** + * Represents the IMDS protocol version simulated by the {@link Ec2ImdsHttpHandler}. + */ +public enum Ec2ImdsVersion { + /** + * Classic V1 behavior: plain {@code GET} requests, no tokens. + */ + V1, + + /** + * Newer V2 behavior: {@code GET} requests must include a {@code X-aws-ec2-metadata-token} header providing a token previously obtained + * by calling {@code PUT /latest/api/token}. + */ + V2 +} diff --git a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java new file mode 100644 index 0000000000000..bb613395a0fba --- /dev/null +++ b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java @@ -0,0 +1,252 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.aws.imds; + +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpContext; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpPrincipal; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.aMapWithSize; + +public class Ec2ImdsHttpHandlerTests extends ESTestCase { + + private static final String SECURITY_CREDENTIALS_URI = "/latest/meta-data/iam/security-credentials/"; + + public void testImdsV1() throws IOException { + final Map generatedCredentials = new HashMap<>(); + + final var handler = new Ec2ImdsHttpHandler(Ec2ImdsVersion.V1, generatedCredentials::put, Set.of()); + + final var roleResponse = handleRequest(handler, "GET", SECURITY_CREDENTIALS_URI); + assertEquals(RestStatus.OK, roleResponse.status()); + final var profileName = roleResponse.body().utf8ToString(); + assertTrue(Strings.hasText(profileName)); + + final var credentialsResponse = handleRequest(handler, "GET", SECURITY_CREDENTIALS_URI + profileName); + assertEquals(RestStatus.OK, credentialsResponse.status()); + + assertThat(generatedCredentials, aMapWithSize(1)); + final var accessKey = generatedCredentials.keySet().iterator().next(); + final var sessionToken = generatedCredentials.values().iterator().next(); + + final var responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), credentialsResponse.body().streamInput(), false); + assertEquals(Set.of("AccessKeyId", "Expiration", "RoleArn", "SecretAccessKey", "Token"), responseMap.keySet()); + assertEquals(accessKey, responseMap.get("AccessKeyId")); + assertEquals(sessionToken, responseMap.get("Token")); + } + + public void testImdsV2Disabled() { + assertEquals( + RestStatus.METHOD_NOT_ALLOWED, + handleRequest( + new Ec2ImdsHttpHandler(Ec2ImdsVersion.V1, (accessKey, sessionToken) -> fail(), Set.of()), + "PUT", + "/latest/api/token" + ).status() + ); + } + + public void testImdsV2() throws IOException { + final Map generatedCredentials = new HashMap<>(); + + final var handler = new Ec2ImdsHttpHandler(Ec2ImdsVersion.V2, generatedCredentials::put, Set.of()); + + final var tokenResponse = handleRequest(handler, "PUT", "/latest/api/token"); + assertEquals(RestStatus.OK, tokenResponse.status()); + final var token = tokenResponse.body().utf8ToString(); + + final var roleResponse = checkImdsV2GetRequest(handler, SECURITY_CREDENTIALS_URI, token); + assertEquals(RestStatus.OK, roleResponse.status()); + final var profileName = roleResponse.body().utf8ToString(); + assertTrue(Strings.hasText(profileName)); + + final var credentialsResponse = checkImdsV2GetRequest(handler, SECURITY_CREDENTIALS_URI + profileName, token); + assertEquals(RestStatus.OK, credentialsResponse.status()); + + assertThat(generatedCredentials, aMapWithSize(1)); + final var accessKey = generatedCredentials.keySet().iterator().next(); + final var sessionToken = generatedCredentials.values().iterator().next(); + + final var responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), credentialsResponse.body().streamInput(), false); + assertEquals(Set.of("AccessKeyId", "Expiration", "RoleArn", "SecretAccessKey", "Token"), responseMap.keySet()); + assertEquals(accessKey, responseMap.get("AccessKeyId")); + assertEquals(sessionToken, responseMap.get("Token")); + } + + private record TestHttpResponse(RestStatus status, BytesReference body) {} + + private static TestHttpResponse checkImdsV2GetRequest(Ec2ImdsHttpHandler handler, String uri, String token) { + final var unauthorizedResponse = handleRequest(handler, "GET", uri, null); + assertEquals(RestStatus.UNAUTHORIZED, unauthorizedResponse.status()); + + final var forbiddenResponse = handleRequest(handler, "GET", uri, randomValueOtherThan(token, ESTestCase::randomSecretKey)); + assertEquals(RestStatus.FORBIDDEN, forbiddenResponse.status()); + + return handleRequest(handler, "GET", uri, token); + } + + private static TestHttpResponse handleRequest(Ec2ImdsHttpHandler handler, String method, String uri) { + return handleRequest(handler, method, uri, null); + } + + private static TestHttpResponse handleRequest(Ec2ImdsHttpHandler handler, String method, String uri, @Nullable String token) { + final Headers headers; + if (token == null) { + headers = TestHttpExchange.EMPTY_HEADERS; + } else { + headers = new Headers(); + headers.put("X-aws-ec2-metadata-token", List.of(token)); + } + + final var httpExchange = new TestHttpExchange(method, uri, BytesArray.EMPTY, headers); + try { + handler.handle(httpExchange); + } catch (IOException e) { + fail(e); + } + assertNotEquals(0, httpExchange.getResponseCode()); + return new TestHttpResponse(RestStatus.fromCode(httpExchange.getResponseCode()), httpExchange.getResponseBodyContents()); + } + + private static class TestHttpExchange extends HttpExchange { + + private static final Headers EMPTY_HEADERS = new Headers(); + + private final String method; + private final URI uri; + private final BytesReference requestBody; + private final Headers requestHeaders; + + private final Headers responseHeaders = new Headers(); + private final BytesStreamOutput responseBody = new BytesStreamOutput(); + private int responseCode; + + TestHttpExchange(String method, String uri, BytesReference requestBody, Headers requestHeaders) { + this.method = method; + this.uri = URI.create(uri); + this.requestBody = requestBody; + this.requestHeaders = requestHeaders; + } + + @Override + public Headers getRequestHeaders() { + return requestHeaders; + } + + @Override + public Headers getResponseHeaders() { + return responseHeaders; + } + + @Override + public URI getRequestURI() { + return uri; + } + + @Override + public String getRequestMethod() { + return method; + } + + @Override + public HttpContext getHttpContext() { + return null; + } + + @Override + public void close() {} + + @Override + public InputStream getRequestBody() { + try { + return requestBody.streamInput(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public OutputStream getResponseBody() { + return responseBody; + } + + @Override + public void sendResponseHeaders(int rCode, long responseLength) { + this.responseCode = rCode; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public int getResponseCode() { + return responseCode; + } + + public BytesReference getResponseBodyContents() { + return responseBody.bytes(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public String getProtocol() { + return "HTTP/1.1"; + } + + @Override + public Object getAttribute(String name) { + return null; + } + + @Override + public void setAttribute(String name, Object value) { + fail("setAttribute not implemented"); + } + + @Override + public void setStreams(InputStream i, OutputStream o) { + fail("setStreams not implemented"); + } + + @Override + public HttpPrincipal getPrincipal() { + fail("getPrincipal not implemented"); + throw new UnsupportedOperationException("getPrincipal not implemented"); + } + } + +} diff --git a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java index 51e3185623360..f6b52a32a9a1d 100644 --- a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java +++ b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java @@ -24,6 +24,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.test.fixture.HttpHeaderParser; import java.io.BufferedReader; import java.io.IOException; @@ -58,8 +59,6 @@ public class GoogleCloudStorageHttpHandler implements HttpHandler { private static final Logger logger = LogManager.getLogger(GoogleCloudStorageHttpHandler.class); - private static final Pattern RANGE_MATCHER = Pattern.compile("bytes=([0-9]*)-([0-9]*)"); - private final ConcurrentMap blobs; private final String bucket; @@ -131,19 +130,19 @@ public void handle(final HttpExchange exchange) throws IOException { // Download Object https://cloud.google.com/storage/docs/request-body BytesReference blob = blobs.get(exchange.getRequestURI().getPath().replace("/download/storage/v1/b/" + bucket + "/o/", "")); if (blob != null) { - final String range = exchange.getRequestHeaders().getFirst("Range"); + final String rangeHeader = exchange.getRequestHeaders().getFirst("Range"); final long offset; final long end; - if (range == null) { + if (rangeHeader == null) { offset = 0L; end = blob.length() - 1; } else { - Matcher matcher = RANGE_MATCHER.matcher(range); - if (matcher.find() == false) { - throw new AssertionError("Range bytes header does not match expected format: " + range); + final HttpHeaderParser.Range range = HttpHeaderParser.parseRangeHeader(rangeHeader); + if (range == null) { + throw new AssertionError("Range bytes header does not match expected format: " + rangeHeader); } - offset = Long.parseLong(matcher.group(1)); - end = Long.parseLong(matcher.group(2)); + offset = range.start(); + end = range.end(); } if (offset >= blob.length()) { diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 8911bdf286a0f..9dc0263f49aee 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -8,7 +8,7 @@ */ apply plugin: 'elasticsearch.java' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java index 285bbb91983cc..3ee18d71a5a79 100644 --- a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java @@ -18,17 +18,13 @@ public final class MinioTestContainer extends DockerEnvironmentAwareTestContaine public static final String DOCKER_BASE_IMAGE = "minio/minio:RELEASE.2021-03-01T04-20-55Z"; private final boolean enabled; - public MinioTestContainer() { - this(true); - } - - public MinioTestContainer(boolean enabled) { + public MinioTestContainer(boolean enabled, String accessKey, String secretKey, String bucketName) { super( new ImageFromDockerfile("es-minio-testfixture").withDockerfileFromBuilder( builder -> builder.from(DOCKER_BASE_IMAGE) - .env("MINIO_ACCESS_KEY", "s3_test_access_key") - .env("MINIO_SECRET_KEY", "s3_test_secret_key") - .run("mkdir -p /minio/data/bucket") + .env("MINIO_ACCESS_KEY", accessKey) + .env("MINIO_SECRET_KEY", secretKey) + .run("mkdir -p /minio/data/" + bucketName) .cmd("server", "/minio/data") .build() ) diff --git a/test/fixtures/s3-fixture/build.gradle b/test/fixtures/s3-fixture/build.gradle index d628800497293..e4c35464608a8 100644 --- a/test/fixtures/s3-fixture/build.gradle +++ b/test/fixtures/s3-fixture/build.gradle @@ -15,5 +15,5 @@ dependencies { api("junit:junit:${versions.junit}") { transitive = false } - testImplementation project(':test:framework') + implementation project(':test:framework') } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/DynamicS3Credentials.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/DynamicS3Credentials.java new file mode 100644 index 0000000000000..4e8f267ad3543 --- /dev/null +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/DynamicS3Credentials.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package fixture.s3; + +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; + +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Allows dynamic creation of access-key/session-token credentials for accessing AWS services such as S3. Typically there's one service + * (e.g. IMDS or STS) which creates credentials dynamically and registers them here using {@link #addValidCredentials}, and then the + * {@link S3HttpFixture} uses {@link #isAuthorized} to validate the credentials it receives corresponds with some previously-generated + * credentials. + */ +public class DynamicS3Credentials { + private final Map> validCredentialsMap = ConcurrentCollections.newConcurrentMap(); + + public boolean isAuthorized(String authorizationHeader, String sessionTokenHeader) { + return authorizationHeader != null + && sessionTokenHeader != null + && validCredentialsMap.getOrDefault(sessionTokenHeader, Set.of()).stream().anyMatch(authorizationHeader::contains); + } + + public void addValidCredentials(String accessKey, String sessionToken) { + validCredentialsMap.computeIfAbsent( + Objects.requireNonNull(sessionToken, "sessionToken"), + t -> ConcurrentCollections.newConcurrentSet() + ).add(Objects.requireNonNull(accessKey, "accessKey")); + } +} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java index 29123d6f2e7eb..ab70f043043cc 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java @@ -21,29 +21,27 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.Objects; +import java.util.function.BiPredicate; +import java.util.function.Supplier; public class S3HttpFixture extends ExternalResource { private HttpServer server; - private boolean enabled; + private final boolean enabled; private final String bucket; private final String basePath; - protected final String accessKey; - - public S3HttpFixture() { - this(true); - } + private final BiPredicate authorizationPredicate; public S3HttpFixture(boolean enabled) { - this(enabled, "bucket", "base_path_integration_tests", "s3_test_access_key"); + this(enabled, "bucket", "base_path_integration_tests", fixedAccessKey("s3_test_access_key")); } - public S3HttpFixture(boolean enabled, String bucket, String basePath, String accessKey) { + public S3HttpFixture(boolean enabled, String bucket, String basePath, BiPredicate authorizationPredicate) { this.enabled = enabled; this.bucket = bucket; this.basePath = basePath; - this.accessKey = accessKey; + this.authorizationPredicate = authorizationPredicate; } protected HttpHandler createHandler() { @@ -51,9 +49,11 @@ protected HttpHandler createHandler() { @Override public void handle(final HttpExchange exchange) throws IOException { try { - final String authorization = exchange.getRequestHeaders().getFirst("Authorization"); - if (authorization == null || authorization.contains(accessKey) == false) { - sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "Bad access key"); + if (authorizationPredicate.test( + exchange.getRequestHeaders().getFirst("Authorization"), + exchange.getRequestHeaders().getFirst("x-amz-security-token") + ) == false) { + sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "Access denied by " + authorizationPredicate); return; } super.handle(exchange); @@ -76,7 +76,7 @@ public void stop(int delay) { protected void before() throws Throwable { if (enabled) { - InetSocketAddress inetSocketAddress = resolveAddress("localhost", 0); + InetSocketAddress inetSocketAddress = resolveAddress(); this.server = HttpServer.create(inetSocketAddress, 0); HttpHandler handler = createHandler(); this.server.createContext("/", Objects.requireNonNull(handler)); @@ -91,11 +91,27 @@ protected void after() { } } - private static InetSocketAddress resolveAddress(String address, int port) { + private static InetSocketAddress resolveAddress() { try { - return new InetSocketAddress(InetAddress.getByName(address), port); + return new InetSocketAddress(InetAddress.getByName("localhost"), 0); } catch (UnknownHostException e) { throw new RuntimeException(e); } } + + public static BiPredicate fixedAccessKey(String accessKey) { + return mutableAccessKey(() -> accessKey); + } + + public static BiPredicate mutableAccessKey(Supplier accessKeySupplier) { + return (authorizationHeader, sessionTokenHeader) -> authorizationHeader != null + && authorizationHeader.contains(accessKeySupplier.get()); + } + + public static BiPredicate fixedAccessKeyAndToken(String accessKey, String sessionToken) { + Objects.requireNonNull(sessionToken); + final var accessKeyPredicate = fixedAccessKey(accessKey); + return (authorizationHeader, sessionTokenHeader) -> accessKeyPredicate.test(authorizationHeader, sessionTokenHeader) + && sessionToken.equals(sessionTokenHeader); + } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java deleted file mode 100644 index d7048cbea6b8a..0000000000000 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package fixture.s3; - -import com.sun.net.httpserver.HttpHandler; - -import org.elasticsearch.rest.RestStatus; - -import java.nio.charset.StandardCharsets; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.util.Locale; - -public class S3HttpFixtureWithEC2 extends S3HttpFixtureWithSessionToken { - - private static final String EC2_PATH = "/latest/meta-data/iam/security-credentials/"; - private static final String EC2_PROFILE = "ec2Profile"; - - public S3HttpFixtureWithEC2() { - this(true); - } - - public S3HttpFixtureWithEC2(boolean enabled) { - this(enabled, "ec2_bucket", "ec2_base_path", "ec2_access_key", "ec2_session_token"); - } - - public S3HttpFixtureWithEC2(boolean enabled, String bucket, String basePath, String accessKey, String sessionToken) { - super(enabled, bucket, basePath, accessKey, sessionToken); - } - - @Override - protected HttpHandler createHandler() { - final HttpHandler delegate = super.createHandler(); - - return exchange -> { - final String path = exchange.getRequestURI().getPath(); - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - if ("GET".equals(exchange.getRequestMethod()) && path.startsWith(EC2_PATH)) { - if (path.equals(EC2_PATH)) { - final byte[] response = EC2_PROFILE.getBytes(StandardCharsets.UTF_8); - exchange.getResponseHeaders().add("Content-Type", "text/plain"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); - exchange.getResponseBody().write(response); - exchange.close(); - return; - - } else if (path.equals(EC2_PATH + EC2_PROFILE)) { - final byte[] response = buildCredentialResponse(accessKey, sessionToken).getBytes(StandardCharsets.UTF_8); - exchange.getResponseHeaders().add("Content-Type", "application/json"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); - exchange.getResponseBody().write(response); - exchange.close(); - return; - } - - final byte[] response = "unknown profile".getBytes(StandardCharsets.UTF_8); - exchange.getResponseHeaders().add("Content-Type", "text/plain"); - exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), response.length); - exchange.getResponseBody().write(response); - exchange.close(); - return; - - } - delegate.handle(exchange); - }; - } - - protected static String buildCredentialResponse(final String ec2AccessKey, final String ec2SessionToken) { - return String.format(Locale.ROOT, """ - { - "AccessKeyId": "%s", - "Expiration": "%s", - "RoleArn": "arn", - "SecretAccessKey": "secret_access_key", - "Token": "%s" - }""", ec2AccessKey, ZonedDateTime.now().plusDays(1L).format(DateTimeFormatter.ISO_DATE_TIME), ec2SessionToken); - } -} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java deleted file mode 100644 index d6266ea75dd3a..0000000000000 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package fixture.s3; - -import com.sun.net.httpserver.HttpHandler; - -import org.elasticsearch.rest.RestStatus; - -import java.nio.charset.StandardCharsets; - -public class S3HttpFixtureWithECS extends S3HttpFixtureWithEC2 { - - public S3HttpFixtureWithECS() { - this(true); - } - - public S3HttpFixtureWithECS(boolean enabled) { - this(enabled, "ecs_bucket", "ecs_base_path", "ecs_access_key", "ecs_session_token"); - } - - public S3HttpFixtureWithECS(boolean enabled, String bucket, String basePath, String accessKey, String sessionToken) { - super(enabled, bucket, basePath, accessKey, sessionToken); - } - - @Override - protected HttpHandler createHandler() { - final HttpHandler delegate = super.createHandler(); - - return exchange -> { - // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html - if ("GET".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getPath().equals("/ecs_credentials_endpoint")) { - final byte[] response = buildCredentialResponse(accessKey, sessionToken).getBytes(StandardCharsets.UTF_8); - exchange.getResponseHeaders().add("Content-Type", "application/json"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); - exchange.getResponseBody().write(response); - exchange.close(); - return; - } - delegate.handle(exchange); - }; - } -} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java deleted file mode 100644 index 1a1cbba651e06..0000000000000 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package fixture.s3; - -import com.sun.net.httpserver.HttpHandler; - -import org.elasticsearch.rest.RestStatus; - -import static fixture.s3.S3HttpHandler.sendError; - -public class S3HttpFixtureWithSessionToken extends S3HttpFixture { - - protected final String sessionToken; - - public S3HttpFixtureWithSessionToken() { - this(true); - } - - public S3HttpFixtureWithSessionToken(boolean enabled) { - this(enabled, "session_token_bucket", "session_token_base_path_integration_tests", "session_token_access_key", "session_token"); - } - - public S3HttpFixtureWithSessionToken(boolean enabled, String bucket, String basePath, String accessKey, String sessionToken) { - super(enabled, bucket, basePath, accessKey); - this.sessionToken = sessionToken; - } - - @Override - protected HttpHandler createHandler() { - final HttpHandler delegate = super.createHandler(); - return exchange -> { - final String securityToken = exchange.getRequestHeaders().getFirst("x-amz-security-token"); - if (securityToken == null) { - sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "No session token"); - return; - } - if (securityToken.equals(sessionToken) == false) { - sendError(exchange, RestStatus.FORBIDDEN, "AccessDenied", "Bad session token"); - return; - } - delegate.handle(exchange); - }; - } -} diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java index 56d3454aa5544..bfc0428731c56 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java @@ -28,6 +28,7 @@ import org.elasticsearch.logging.Logger; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.test.fixture.HttpHeaderParser; import java.io.IOException; import java.io.InputStreamReader; @@ -269,8 +270,8 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); return; } - final String range = exchange.getRequestHeaders().getFirst("Range"); - if (range == null) { + final String rangeHeader = exchange.getRequestHeaders().getFirst("Range"); + if (rangeHeader == null) { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); blob.writeTo(exchange.getResponseBody()); @@ -281,17 +282,12 @@ public void handle(final HttpExchange exchange) throws IOException { // requests with a header value like "Range: bytes=start-end" where both {@code start} and {@code end} are always defined // (sometimes to very high value for {@code end}). It would be too tedious to fully support the RFC so S3HttpHandler only // supports when both {@code start} and {@code end} are defined to match the SDK behavior. - final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(range); - if (matcher.matches() == false) { - throw new AssertionError("Bytes range does not match expected pattern: " + range); - } - var groupStart = matcher.group(1); - var groupEnd = matcher.group(2); - if (groupStart == null || groupEnd == null) { - throw new AssertionError("Bytes range does not match expected pattern: " + range); + final HttpHeaderParser.Range range = HttpHeaderParser.parseRangeHeader(rangeHeader); + if (range == null) { + throw new AssertionError("Bytes range does not match expected pattern: " + rangeHeader); } - long start = Long.parseLong(groupStart); - long end = Long.parseLong(groupEnd); + long start = range.start(); + long end = range.end(); if (end < start) { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); diff --git a/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java b/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java index 375f428f748e6..58f32292fa91c 100644 --- a/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java +++ b/test/fixtures/s3-fixture/src/test/java/fixture/s3/S3HttpHandlerTests.java @@ -31,6 +31,8 @@ import java.util.List; import java.util.Objects; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; public class S3HttpHandlerTests extends ESTestCase { @@ -261,7 +263,7 @@ public void testListAndAbortMultipartUpload() { path/blob10000false\ """), handleRequest(handler, "GET", "/bucket/?uploads&prefix=path/blob")); - assertEquals(RestStatus.NOT_FOUND, handleRequest(handler, "POST", "/bucket/path/blob?uploadId=" + uploadId, Strings.format(""" + final var completeUploadResponse = handleRequest(handler, "POST", "/bucket/path/blob?uploadId=" + uploadId, Strings.format(""" @@ -272,7 +274,13 @@ public void testListAndAbortMultipartUpload() { %s 2 - """, part1Etag, part2Etag)).status()); + """, part1Etag, part2Etag)); + if (completeUploadResponse.status() == RestStatus.OK) { + // possible, but rare, indicating that S3 started processing the upload before returning an error + assertThat(completeUploadResponse.body().utf8ToString(), allOf(containsString(""), containsString("NoSuchUpload"))); + } else { + assertEquals(RestStatus.NOT_FOUND, completeUploadResponse.status()); + } } private static String getUploadId(BytesReference createUploadResponseBody) { diff --git a/test/fixtures/url-fixture/src/main/java/fixture/url/URLFixture.java b/test/fixtures/url-fixture/src/main/java/fixture/url/URLFixture.java index 4c3159fc3c849..860f6ff141689 100644 --- a/test/fixtures/url-fixture/src/main/java/fixture/url/URLFixture.java +++ b/test/fixtures/url-fixture/src/main/java/fixture/url/URLFixture.java @@ -10,6 +10,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.fixture.AbstractHttpFixture; +import org.elasticsearch.test.fixture.HttpHeaderParser; import org.junit.rules.TemporaryFolder; import org.junit.rules.TestRule; @@ -21,15 +22,12 @@ import java.nio.file.Path; import java.util.HashMap; import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * This {@link URLFixture} exposes a filesystem directory over HTTP. It is used in repository-url * integration tests to expose a directory created by a regular FS repository. */ public class URLFixture extends AbstractHttpFixture implements TestRule { - private static final Pattern RANGE_PATTERN = Pattern.compile("bytes=(\\d+)-(\\d+)$"); private final TemporaryFolder temporaryFolder; private Path repositoryDir; @@ -60,19 +58,19 @@ private AbstractHttpFixture.Response handleGetRequest(Request request) throws IO if (normalizedPath.startsWith(normalizedRepositoryDir)) { if (Files.exists(normalizedPath) && Files.isReadable(normalizedPath) && Files.isRegularFile(normalizedPath)) { - final String range = request.getHeader("Range"); + final String rangeHeader = request.getHeader("Range"); final Map headers = new HashMap<>(contentType("application/octet-stream")); - if (range == null) { + if (rangeHeader == null) { byte[] content = Files.readAllBytes(normalizedPath); headers.put("Content-Length", String.valueOf(content.length)); return new Response(RestStatus.OK.getStatus(), headers, content); } else { - final Matcher matcher = RANGE_PATTERN.matcher(range); - if (matcher.matches() == false) { + final HttpHeaderParser.Range range = HttpHeaderParser.parseRangeHeader(rangeHeader); + if (range == null) { return new Response(RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); } else { - long start = Long.parseLong(matcher.group(1)); - long end = Long.parseLong(matcher.group(2)); + long start = range.start(); + long end = range.end(); long rangeLength = end - start + 1; final long fileSize = Files.size(normalizedPath); if (start >= fileSize || start > end || rangeLength > fileSize) { diff --git a/test/framework/build.gradle b/test/framework/build.gradle index f130ecf131848..126b95041da11 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -6,7 +6,6 @@ * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ -import org.elasticsearch.gradle.internal.info.BuildParams; apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' @@ -85,9 +84,9 @@ tasks.named("thirdPartyAudit").configure { } tasks.named("test").configure { - systemProperty 'tests.gradle_index_compat_versions', BuildParams.bwcVersions.indexCompatible.join(',') - systemProperty 'tests.gradle_wire_compat_versions', BuildParams.bwcVersions.wireCompatible.join(',') - systemProperty 'tests.gradle_unreleased_versions', BuildParams.bwcVersions.unreleased.join(',') + systemProperty 'tests.gradle_index_compat_versions', buildParams.bwcVersions.indexCompatible.join(',') + systemProperty 'tests.gradle_wire_compat_versions', buildParams.bwcVersions.wireCompatible.join(',') + systemProperty 'tests.gradle_unreleased_versions', buildParams.bwcVersions.unreleased.join(',') } tasks.register("integTest", Test) { diff --git a/test/framework/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/StoredScriptIntegTestUtils.java b/test/framework/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/StoredScriptIntegTestUtils.java index 5f979d75ec382..0a090af431dae 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/StoredScriptIntegTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/StoredScriptIntegTestUtils.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.script.StoredScriptSource; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; @@ -25,11 +26,22 @@ public static void putJsonStoredScript(String id, String jsonContent) { } public static void putJsonStoredScript(String id, BytesReference jsonContent) { - assertAcked( - ESIntegTestCase.safeExecute( - TransportPutStoredScriptAction.TYPE, - new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id(id).content(jsonContent, XContentType.JSON) - ) + assertAcked(ESIntegTestCase.safeExecute(TransportPutStoredScriptAction.TYPE, newPutStoredScriptTestRequest(id, jsonContent))); + } + + public static PutStoredScriptRequest newPutStoredScriptTestRequest(String id, String jsonContent) { + return newPutStoredScriptTestRequest(id, new BytesArray(jsonContent)); + } + + public static PutStoredScriptRequest newPutStoredScriptTestRequest(String id, BytesReference jsonContent) { + return new PutStoredScriptRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + id, + null, + jsonContent, + XContentType.JSON, + StoredScriptSource.parse(jsonContent, XContentType.JSON) ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index a041efc9ad3f1..75cd6da44724d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -19,12 +19,12 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingNodesHelper; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedShard; -import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; @@ -438,11 +438,13 @@ public void allocateUnassigned( } protected static final NodeAllocationStatsProvider EMPTY_NODE_ALLOCATION_STATS = new NodeAllocationStatsProvider( - WriteLoadForecaster.DEFAULT + WriteLoadForecaster.DEFAULT, + createBuiltInClusterSettings() ) { @Override - public Map stats( - ClusterState clusterState, + public Map stats( + Metadata metadata, + RoutingNodes routingNodes, ClusterInfo clusterInfo, @Nullable DesiredBalance desiredBalance ) { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 5ca52024e82f6..add110de35a0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -157,23 +157,14 @@ public static DataStream newInstance( .build(); } - public static String getLegacyDefaultBackingIndexName( - String dataStreamName, - long generation, - long epochMillis, - boolean isNewIndexNameFormat - ) { - if (isNewIndexNameFormat) { - return String.format( - Locale.ROOT, - BACKING_INDEX_PREFIX + "%s-%s-%06d", - dataStreamName, - DATE_FORMATTER.formatMillis(epochMillis), - generation - ); - } else { - return getLegacyDefaultBackingIndexName(dataStreamName, generation); - } + public static String getLegacyDefaultBackingIndexName(String dataStreamName, long generation, long epochMillis) { + return String.format( + Locale.ROOT, + BACKING_INDEX_PREFIX + "%s-%s-%06d", + dataStreamName, + DATE_FORMATTER.formatMillis(epochMillis), + generation + ); } public static String getLegacyDefaultBackingIndexName(String dataStreamName, long generation) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java index 2e8dc287a4c40..fe1b08d5e738d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java @@ -9,7 +9,6 @@ package org.elasticsearch.index; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -20,7 +19,6 @@ import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.script.ScriptCompiler; @@ -62,13 +60,7 @@ public static MapperService newMapperService( IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexName, finalSettings); IndexAnalyzers indexAnalyzers = createTestAnalysis(indexSettings, finalSettings).indexAnalyzers; SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); return new MapperService( () -> TransportVersion.current(), indexSettings, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index bf47efcad7b53..66d87f3532cbd 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -19,7 +19,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; @@ -283,13 +282,7 @@ public MapperService build() { getPlugins().stream().filter(p -> p instanceof MapperPlugin).map(p -> (MapperPlugin) p).collect(toList()) ).getMapperRegistry(); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); var mapperService = new MapperService( () -> TransportVersion.current(), @@ -762,17 +755,11 @@ protected SearchExecutionContext createSearchExecutionContext(MapperService mapp IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); final SimilarityService similarityService = new SimilarityService(indexSettings, null, Map.of()); final long nowInMillis = randomNonNegativeLong(); - return new SearchExecutionContext(0, 0, indexSettings, new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }), + return new SearchExecutionContext( + 0, + 0, + indexSettings, + new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP), (ft, fdc) -> ft.fielddataBuilder(fdc).build(new IndexFieldDataCache.None(), new NoneCircuitBreakerService()), mapperService, mapperService.mappingLookup(), diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java index e538a9955d9b6..449ecc099412f 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; @@ -39,7 +38,7 @@ protected boolean isSupportedOn(IndexVersion version) { protected abstract void registerParameters(ParameterChecker checker) throws IOException; - private record ConflictCheck(XContentBuilder init, XContentBuilder update) {} + private record ConflictCheck(XContentBuilder init, XContentBuilder update, Consumer check) {} private record UpdateCheck(XContentBuilder init, XContentBuilder update, Consumer check) {} @@ -59,7 +58,7 @@ public void registerConflictCheck(String param, CheckedConsumer {})); } /** @@ -69,8 +68,8 @@ public void registerConflictCheck(String param, CheckedConsumer check) { + conflictChecks.put(param, new ConflictCheck(init, update, check)); } public void registerUpdateCheck(XContentBuilder init, XContentBuilder update, Consumer check) { @@ -96,6 +95,7 @@ public final void testUpdates() throws IOException { e.getMessage(), anyOf(containsString("Cannot update parameter [" + param + "]"), containsString("different [" + param + "]")) ); + checker.conflictChecks.get(param).check.accept(mapperService.documentMapper()); } for (UpdateCheck updateCheck : checker.updateChecks) { MapperService mapperService = createMapperService(updateCheck.init); @@ -142,52 +142,4 @@ public final void testFixedMetaFieldsAreNotConfigurable() throws IOException { ); assertEquals("Failed to parse mapping: " + fieldName() + " is not configurable", exception.getMessage()); } - - public void testTypeAndFriendsAreSilentlyIgnoredBefore_8_6_0() throws IOException { - assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); - IndexVersion previousVersion = IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_6_0); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, previousVersion); - assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); - MapperService mapperService = createMapperService(version, mapping(b -> {})); - // these parameters were previously silently ignored, they will still be ignored in existing indices - String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; - for (String param : unsupportedParameters) { - String mappingAsString = "{\n" - + " \"_doc\" : {\n" - + " \"" - + fieldName() - + "\" : {\n" - + " \"" - + param - + "\" : \"any\"\n" - + " }\n" - + " }\n" - + "}"; - assertNotNull(mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString))); - } - } - - public void testTypeAndFriendsAreDeprecatedFrom_8_6_0() throws IOException { - assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_6_0, IndexVersion.current()); - assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); - MapperService mapperService = createMapperService(version, mapping(b -> {})); - // these parameters were previously silently ignored, they are now deprecated in new indices - String[] unsupportedParameters = new String[] { "fields", "copy_to", "boost", "type" }; - for (String param : unsupportedParameters) { - String mappingAsString = "{\n" - + " \"_doc\" : {\n" - + " \"" - + fieldName() - + "\" : {\n" - + " \"" - + param - + "\" : \"any\"\n" - + " }\n" - + " }\n" - + "}"; - assertNotNull(mapperService.parseMapping("_doc", MergeReason.MAPPING_UPDATE, new CompressedXContent(mappingAsString))); - assertWarnings("Parameter [" + param + "] has no effect on metadata field [" + fieldName() + "] and will be removed in future"); - } - } } diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/IngestPipelineTestUtils.java b/test/framework/src/main/java/org/elasticsearch/ingest/IngestPipelineTestUtils.java index 8fd3c61d4c9da..9888b1eb661ff 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/IngestPipelineTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/IngestPipelineTestUtils.java @@ -14,11 +14,13 @@ import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineTransportAction; +import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.ESTestCase; @@ -124,4 +126,18 @@ public void onFailure(Exception e) { ); } } + + /** + * Construct a new {@link SimulatePipelineRequest} whose content is the given JSON document, represented as a {@link String}. + */ + public static SimulatePipelineRequest jsonSimulatePipelineRequest(String jsonString) { + return jsonSimulatePipelineRequest(new BytesArray(jsonString)); + } + + /** + * Construct a new {@link SimulatePipelineRequest} whose content is the given JSON document, represented as a {@link BytesReference}. + */ + public static SimulatePipelineRequest jsonSimulatePipelineRequest(BytesReference jsonBytes) { + return new SimulatePipelineRequest(ReleasableBytesReference.wrap(jsonBytes), XContentType.JSON); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 40fb4f91c77d0..d3bfacdf7691a 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -31,6 +31,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.readiness.MockReadinessService; import org.elasticsearch.readiness.ReadinessService; @@ -41,7 +42,6 @@ import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.rank.feature.RankFeatureShardPhase; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; @@ -99,9 +99,7 @@ SearchService newSearchService( ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, - RankFeatureShardPhase rankFeatureShardPhase, FetchPhase fetchPhase, - ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, ExecutorSelector executorSelector, Tracer tracer @@ -114,9 +112,7 @@ SearchService newSearchService( threadPool, scriptService, bigArrays, - rankFeatureShardPhase, fetchPhase, - responseCollectorService, circuitBreakerService, executorSelector, tracer @@ -128,9 +124,7 @@ SearchService newSearchService( threadPool, scriptService, bigArrays, - rankFeatureShardPhase, fetchPhase, - responseCollectorService, circuitBreakerService, executorSelector, tracer @@ -279,10 +273,11 @@ private MockNode( final Collection> classpathPlugins, final boolean forbidPrivateIndexSettings ) { - super(NodeConstruction.prepareConstruction(environment, new MockServiceProvider() { + super(NodeConstruction.prepareConstruction(environment, null, new MockServiceProvider() { + @Override - PluginsService newPluginService(Environment environment, Settings settings) { - return new MockPluginsService(settings, environment, classpathPlugins); + PluginsService newPluginService(Environment environment, PluginsLoader pluginsLoader) { + return new MockPluginsService(environment.settings(), environment, classpathPlugins); } }, forbidPrivateIndexSettings)); diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java index e4734f9cf021e..a9a825af3b865 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java @@ -16,11 +16,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.Environment; -import org.elasticsearch.jdk.ModuleQualifiedExportsService; import org.elasticsearch.plugins.spi.SPIClassIterator; import java.lang.reflect.Constructor; -import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -44,14 +42,16 @@ public class MockPluginsService extends PluginsService { * @param classpathPlugins Plugins that exist in the classpath which should be loaded */ public MockPluginsService(Settings settings, Environment environment, Collection> classpathPlugins) { - super(settings, environment.configFile(), environment.modulesFile(), environment.pluginsFile()); - - final Path configPath = environment.configFile(); + super( + settings, + environment.configFile(), + new PluginsLoader(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap()) + ); List pluginsLoaded = new ArrayList<>(); for (Class pluginClass : classpathPlugins) { - Plugin plugin = loadPlugin(pluginClass, settings, configPath); + Plugin plugin = loadPlugin(pluginClass, settings, environment.configFile()); PluginDescriptor pluginInfo = new PluginDescriptor( pluginClass.getName(), "classpath plugin", @@ -69,7 +69,7 @@ public MockPluginsService(Settings settings, Environment environment, Collection if (logger.isTraceEnabled()) { logger.trace("plugin loaded from classpath [{}]", pluginInfo); } - pluginsLoaded.add(new LoadedPlugin(pluginInfo, plugin, pluginClass.getClassLoader(), ModuleLayer.boot())); + pluginsLoaded.add(new LoadedPlugin(pluginInfo, plugin, MockPluginsService.class.getClassLoader())); } loadExtensions(pluginsLoaded); this.classpathPlugins = List.copyOf(pluginsLoaded); @@ -169,9 +169,4 @@ private static List createExtensions( } return extensions; } - - @Override - protected void addServerExportsService(Map> qualifiedExports) { - // tests don't run modular - } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index 12094b31a049d..17768c54b2eaf 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -23,9 +23,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.fixture.HttpHeaderParser; import org.junit.After; import org.junit.Before; @@ -40,8 +40,6 @@ import java.util.OptionalInt; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose; import static org.elasticsearch.test.NeverMatcher.never; @@ -371,28 +369,24 @@ protected static byte[] randomBlobContent(int minSize) { return randomByteArrayOfLength(randomIntBetween(minSize, frequently() ? 512 : 1 << 20)); // rarely up to 1mb } - private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); - - protected static Tuple getRange(HttpExchange exchange) { + protected static HttpHeaderParser.Range getRange(HttpExchange exchange) { final String rangeHeader = exchange.getRequestHeaders().getFirst("Range"); if (rangeHeader == null) { - return Tuple.tuple(0L, MAX_RANGE_VAL); + return new HttpHeaderParser.Range(0L, MAX_RANGE_VAL); } - final Matcher matcher = RANGE_PATTERN.matcher(rangeHeader); - assertTrue(rangeHeader + " matches expected pattern", matcher.matches()); - long rangeStart = Long.parseLong(matcher.group(1)); - long rangeEnd = Long.parseLong(matcher.group(2)); - assertThat(rangeStart, lessThanOrEqualTo(rangeEnd)); - return Tuple.tuple(rangeStart, rangeEnd); + final HttpHeaderParser.Range range = HttpHeaderParser.parseRangeHeader(rangeHeader); + assertNotNull(rangeHeader + " matches expected pattern", range); + assertThat(range.start(), lessThanOrEqualTo(range.end())); + return range; } protected static int getRangeStart(HttpExchange exchange) { - return Math.toIntExact(getRange(exchange).v1()); + return Math.toIntExact(getRange(exchange).start()); } protected static OptionalInt getRangeEnd(HttpExchange exchange) { - final long rangeEnd = getRange(exchange).v2(); + final long rangeEnd = getRange(exchange).end(); if (rangeEnd == MAX_RANGE_VAL) { return OptionalInt.empty(); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index 778a6e3106f49..79c61cacb58eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -17,14 +17,12 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.node.MockNode; -import org.elasticsearch.node.ResponseCollectorService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.rank.feature.RankFeatureShardPhase; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; @@ -83,9 +81,7 @@ public MockSearchService( ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, - RankFeatureShardPhase rankFeatureShardPhase, FetchPhase fetchPhase, - ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, ExecutorSelector executorSelector, Tracer tracer @@ -96,9 +92,7 @@ public MockSearchService( threadPool, scriptService, bigArrays, - rankFeatureShardPhase, fetchPhase, - responseCollectorService, circuitBreakerService, executorSelector, tracer diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 7cd2e6e1cc82e..51f66418bb44b 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -48,12 +48,13 @@ import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; -import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.TriConsumer; @@ -144,8 +145,10 @@ import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.subphase.FetchDocValuesPhase; import org.elasticsearch.search.fetch.subphase.FetchSourcePhase; +import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.internal.SubSearchContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; @@ -366,13 +369,7 @@ private AggregationContext createAggregationContext( context.fielddataOperation() ) ).build(new IndexFieldDataCache.None(), breakerService); - BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetFilterCache.Listener() { - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - }); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(indexSettings, BitsetFilterCache.Listener.NOOP); SearchExecutionContext searchExecutionContext = new SearchExecutionContext( 0, -1, @@ -473,7 +470,18 @@ private SubSearchContext buildSubSearchContext( .when(subContext) .getNestedDocuments(); when(ctx.getSearchExecutionContext()).thenReturn(subContext); - + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(randomBoolean()), + new ShardId("index", "indexUUID", 0), + 0, + 1, + AliasFilter.EMPTY, + 1f, + 0L, + null + ); + when(ctx.request()).thenReturn(request); IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0)); when(indexShard.indexSettings()).thenReturn(indexSettings); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index ef6600032ca1b..bdf323afb8d96 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -13,7 +13,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.util.Accountable; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.MockResolvedIndices; import org.elasticsearch.action.OriginalIndices; @@ -58,7 +57,6 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.DateFieldRangeInfo; @@ -486,13 +484,7 @@ private static class ServiceHolder implements Closeable { IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings); scriptService = new MockScriptService(Settings.EMPTY, scriptModule.engines, scriptModule.contexts); similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap()); - this.bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) {} - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) {} - }); + this.bitsetFilterCache = new BitsetFilterCache(idxSettings, BitsetFilterCache.Listener.NOOP); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); mapperService = new MapperService( clusterService, diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java index 7b18cf575f190..ea82c9d21ab89 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractMultiClustersTestCase.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Strings; import org.elasticsearch.plugins.Plugin; @@ -44,6 +45,7 @@ import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; @@ -149,19 +151,23 @@ public static void stopClusters() throws IOException { } protected void disconnectFromRemoteClusters() throws Exception { - Settings.Builder settings = Settings.builder(); final Set clusterAliases = clusterGroup.clusterAliases(); for (String clusterAlias : clusterAliases) { if (clusterAlias.equals(LOCAL_CLUSTER) == false) { - settings.putNull("cluster.remote." + clusterAlias + ".seeds"); - settings.putNull("cluster.remote." + clusterAlias + ".mode"); - settings.putNull("cluster.remote." + clusterAlias + ".proxy_address"); + removeRemoteCluster(clusterAlias); } } + } + + protected void removeRemoteCluster(String clusterAlias) throws Exception { + Settings.Builder settings = Settings.builder(); + settings.putNull("cluster.remote." + clusterAlias + ".seeds"); + settings.putNull("cluster.remote." + clusterAlias + ".mode"); + settings.putNull("cluster.remote." + clusterAlias + ".proxy_address"); client().admin().cluster().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(settings).get(); assertBusy(() -> { for (TransportService transportService : cluster(LOCAL_CLUSTER).getInstances(TransportService.class)) { - assertThat(transportService.getRemoteClusterService().getRegisteredRemoteClusterNames(), empty()); + assertThat(transportService.getRemoteClusterService().getRegisteredRemoteClusterNames(), not(contains(clusterAlias))); } }); } @@ -178,12 +184,17 @@ protected void configureAndConnectsToRemoteClusters() throws Exception { } protected void configureRemoteCluster(String clusterAlias, Collection seedNodes) throws Exception { - final String remoteClusterSettingPrefix = "cluster.remote." + clusterAlias + "."; - Settings.Builder settings = Settings.builder(); - final List seedAddresses = seedNodes.stream().map(node -> { + final var seedAddresses = seedNodes.stream().map(node -> { final TransportService transportService = cluster(clusterAlias).getInstance(TransportService.class, node); - return transportService.boundAddress().publishAddress().toString(); + return transportService.boundAddress().publishAddress(); }).toList(); + configureRemoteClusterWithSeedAddresses(clusterAlias, seedAddresses); + } + + protected void configureRemoteClusterWithSeedAddresses(String clusterAlias, Collection seedNodes) throws Exception { + final String remoteClusterSettingPrefix = "cluster.remote." + clusterAlias + "."; + Settings.Builder settings = Settings.builder(); + final List seedAddresses = seedNodes.stream().map(TransportAddress::toString).toList(); boolean skipUnavailable = skipUnavailableForRemoteClusters().containsKey(clusterAlias) ? skipUnavailableForRemoteClusters().get(clusterAlias) : DEFAULT_SKIP_UNAVAILABLE; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index d7c5c598ce978..af92eae8c8a19 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -281,7 +281,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Annotation for third-party integration tests. *

- * These are tests the require a third-party service in order to run. They + * These are tests, which require a third-party service in order to run. They * may require the user to manually configure an external process (such as rabbitmq), * or may additionally require some external configuration (e.g. AWS credentials) * via the {@code tests.config} system property. diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 207409dfcf751..d983fc854bdfd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1358,6 +1358,13 @@ public static String randomDateFormatterPattern() { return randomFrom(FormatNames.values()).getName(); } + /** + * Generate a random string of at least 112 bits to satisfy minimum entropy requirement when running in FIPS mode. + */ + public static String randomSecretKey() { + return randomAlphaOfLengthBetween(14, 20); + } + /** * Randomly choose between {@link EsExecutors#DIRECT_EXECUTOR_SERVICE} (which does not fork), {@link ThreadPool#generic}, and one of the * other named threadpool executors. @@ -1704,7 +1711,23 @@ protected final BytesReference toShuffledXContent( boolean humanReadable, String... exceptFieldNames ) throws IOException { - BytesReference bytes = XContentHelper.toXContent(toXContent, xContentType, params, humanReadable); + return toShuffledXContent(toXContent, xContentType, RestApiVersion.current(), params, humanReadable, exceptFieldNames); + } + + /** + * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided + * {@link XContentType}. Wraps the output into a new anonymous object according to the value returned + * by the {@link ToXContent#isFragment()} method returns. Shuffles the keys to make sure that parsing never relies on keys ordering. + */ + protected final BytesReference toShuffledXContent( + ToXContent toXContent, + XContentType xContentType, + RestApiVersion restApiVersion, + ToXContent.Params params, + boolean humanReadable, + String... exceptFieldNames + ) throws IOException { + BytesReference bytes = XContentHelper.toXContent(toXContent, xContentType, restApiVersion, params, humanReadable); try (XContentParser parser = createParser(xContentType.xContent(), bytes)) { try (XContentBuilder builder = shuffleXContent(parser, rarely(), exceptFieldNames)) { return BytesReference.bytes(builder); @@ -2315,10 +2338,18 @@ public static void safeAwait(CyclicBarrier barrier) { * flag and asserting that the latch is indeed completed before the timeout. */ public static void safeAwait(CountDownLatch countDownLatch) { + safeAwait(countDownLatch, SAFE_AWAIT_TIMEOUT); + } + + /** + * Await on the given {@link CountDownLatch} with a supplied timeout, preserving the thread's interrupt status + * flag and asserting that the latch is indeed completed before the timeout. + */ + public static void safeAwait(CountDownLatch countDownLatch, TimeValue timeout) { try { assertTrue( "safeAwait: CountDownLatch did not reach zero within the timeout", - countDownLatch.await(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS) + countDownLatch.await(timeout.millis(), TimeUnit.MILLISECONDS) ); } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/fixture/HttpHeaderParser.java b/test/framework/src/main/java/org/elasticsearch/test/fixture/HttpHeaderParser.java new file mode 100644 index 0000000000000..7018e5e259584 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/fixture/HttpHeaderParser.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.fixture; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public enum HttpHeaderParser { + ; + + private static final Pattern RANGE_HEADER_PATTERN = Pattern.compile("bytes=([0-9]+)-([0-9]+)"); + + /** + * Parse a "Range" header + * + * Note: only a single bounded range is supported (e.g. Range: bytes={range_start}-{range_end}) + * + * @see MDN: Range header + * @param rangeHeaderValue The header value as a string + * @return a {@link Range} instance representing the parsed value, or null if the header is malformed + */ + public static Range parseRangeHeader(String rangeHeaderValue) { + final Matcher matcher = RANGE_HEADER_PATTERN.matcher(rangeHeaderValue); + if (matcher.matches()) { + try { + return new Range(Long.parseLong(matcher.group(1)), Long.parseLong(matcher.group(2))); + } catch (NumberFormatException e) { + return null; + } + } + return null; + } + + public record Range(long start, long end) {} +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 5851fc709d14a..6c501898d5fe1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -68,6 +68,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -304,6 +305,10 @@ public static void assertHitCount(SearchRequestBuilder searchRequestBuilder, lon assertResponse(searchRequestBuilder, res -> assertHitCount(res, expectedHitCount)); } + public static void assertHitCount(long expectedHitCount, SearchRequestBuilder... searchRequestBuilders) { + assertResponses(res -> assertHitCount(res, expectedHitCount), searchRequestBuilders); + } + public static void assertHitCount(ActionFuture responseFuture, long expectedHitCount) { try { assertResponse(responseFuture, res -> assertHitCount(res, expectedHitCount)); @@ -375,6 +380,37 @@ public static void assertNoFailuresAndResponse(ActionFuture resp } } + /** + * Same as {@link #assertResponse(RequestBuilder, Consumer)} but runs the same assertion on multiple requests that are started + * concurrently. + */ + @SafeVarargs + public static void assertResponses( + Consumer consumer, + RequestBuilder... searchRequestBuilder + ) { + List> futures = new ArrayList<>(searchRequestBuilder.length); + for (RequestBuilder builder : searchRequestBuilder) { + futures.add(builder.execute()); + } + Throwable tr = null; + for (Future f : futures) { + try { + var res = f.get(); + try { + consumer.accept(res); + } finally { + res.decRef(); + } + } catch (Throwable t) { + tr = ExceptionsHelper.useOrSuppress(tr, t); + } + } + if (tr != null) { + throw new AssertionError(tr); + } + } + public static void assertResponse( RequestBuilder searchRequestBuilder, Consumer consumer diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 28c9905386091..bdef0ba631b72 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -63,12 +63,12 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.rest.RestStatus; @@ -396,30 +396,11 @@ public void initClient() throws IOException { assert nodesVersions != null; } - /** - * Override to provide additional test-only historical features. - * - * Note: This extension point cannot be used to add cluster features. The provided {@link FeatureSpecification}s - * must contain only historical features, otherwise an assertion error is thrown. - */ - protected List additionalTestOnlyHistoricalFeatures() { - return List.of(); - } - protected final TestFeatureService createTestFeatureService( Map> clusterStateFeatures, Set semanticNodeVersions ) { - // Historical features information is unavailable when using legacy test plugins - if (ESRestTestFeatureService.hasFeatureMetadata() == false) { - logger.warn( - "This test is running on the legacy test framework; historical features from production code will not be available. " - + "You need to port the test to the new test plugins in order to use historical features from production code. " - + "If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification such as {}.", - RestTestLegacyFeatures.class.getCanonicalName() - ); - } - return new ESRestTestFeatureService(additionalTestOnlyHistoricalFeatures(), semanticNodeVersions, clusterStateFeatures.values()); + return new ESRestTestFeatureService(semanticNodeVersions, clusterStateFeatures.values()); } protected static boolean has(ProductFeature feature) { @@ -719,10 +700,6 @@ protected boolean preserveTemplatesUponCompletion() { * all feature states, deleting system indices, system associated indices, and system data streams. */ protected boolean resetFeatureStates() { - // ML reset fails when ML is disabled in versions before 8.7 - if (isMlEnabled() == false && clusterHasFeature(RestTestLegacyFeatures.ML_STATE_RESET_FALLBACK_ON_DISABLED) == false) { - return false; - } return true; } @@ -917,50 +894,46 @@ private void wipeCluster() throws Exception { * slows down the test because xpack will just recreate * them. */ - // In case of bwc testing, we need to delete component and composable - // index templates only for clusters that support this historical feature - if (clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED)) { - try { - Request getTemplatesRequest = new Request("GET", "_index_template"); - Map composableIndexTemplates = XContentHelper.convertToMap( - JsonXContent.jsonXContent, - EntityUtils.toString(adminClient().performRequest(getTemplatesRequest).getEntity()), - false - ); - List names = ((List) composableIndexTemplates.get("index_templates")).stream() - .map(ct -> (String) ((Map) ct).get("name")) - .filter(name -> isXPackTemplate(name) == false) - .collect(Collectors.toList()); - if (names.isEmpty() == false) { - try { - adminClient().performRequest(new Request("DELETE", "_index_template/" + String.join(",", names))); - } catch (ResponseException e) { - logger.warn(() -> format("unable to remove multiple composable index templates %s", names), e); - } + try { + Request getTemplatesRequest = new Request("GET", "_index_template"); + Map composableIndexTemplates = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + EntityUtils.toString(adminClient().performRequest(getTemplatesRequest).getEntity()), + false + ); + List names = ((List) composableIndexTemplates.get("index_templates")).stream() + .map(ct -> (String) ((Map) ct).get("name")) + .filter(name -> isXPackTemplate(name) == false) + .collect(Collectors.toList()); + if (names.isEmpty() == false) { + try { + adminClient().performRequest(new Request("DELETE", "_index_template/" + String.join(",", names))); + } catch (ResponseException e) { + logger.warn(() -> format("unable to remove multiple composable index templates %s", names), e); } - } catch (Exception e) { - logger.debug("ignoring exception removing all composable index templates", e); - // We hit a version of ES that doesn't support index templates v2 yet, so it's safe to ignore } - try { - Request compReq = new Request("GET", "_component_template"); - String componentTemplates = EntityUtils.toString(adminClient().performRequest(compReq).getEntity()); - Map cTemplates = XContentHelper.convertToMap(JsonXContent.jsonXContent, componentTemplates, false); - List names = ((List) cTemplates.get("component_templates")).stream() - .map(ct -> (String) ((Map) ct).get("name")) - .filter(name -> isXPackTemplate(name) == false) - .collect(Collectors.toList()); - if (names.isEmpty() == false) { - try { - adminClient().performRequest(new Request("DELETE", "_component_template/" + String.join(",", names))); - } catch (ResponseException e) { - logger.warn(() -> format("unable to remove multiple component templates %s", names), e); - } + } catch (Exception e) { + logger.debug("ignoring exception removing all composable index templates", e); + // We hit a version of ES that doesn't support index templates v2 yet, so it's safe to ignore + } + try { + Request compReq = new Request("GET", "_component_template"); + String componentTemplates = EntityUtils.toString(adminClient().performRequest(compReq).getEntity()); + Map cTemplates = XContentHelper.convertToMap(JsonXContent.jsonXContent, componentTemplates, false); + List names = ((List) cTemplates.get("component_templates")).stream() + .map(ct -> (String) ((Map) ct).get("name")) + .filter(name -> isXPackTemplate(name) == false) + .collect(Collectors.toList()); + if (names.isEmpty() == false) { + try { + adminClient().performRequest(new Request("DELETE", "_component_template/" + String.join(",", names))); + } catch (ResponseException e) { + logger.warn(() -> format("unable to remove multiple component templates %s", names), e); } - } catch (Exception e) { - logger.debug("ignoring exception removing all component templates", e); - // We hit a version of ES that doesn't support index templates v2 yet, so it's safe to ignore } + } catch (Exception e) { + logger.debug("ignoring exception removing all component templates", e); + // We hit a version of ES that doesn't support index templates v2 yet, so it's safe to ignore } if (has(ProductFeature.LEGACY_TEMPLATES)) { @@ -1058,29 +1031,25 @@ private Set getAllUnexpectedTemplates() throws IOException { Set unexpectedTemplates = new HashSet<>(); if (preserveDataStreamsUponCompletion() == false && preserveTemplatesUponCompletion() == false) { if (has(ProductFeature.XPACK)) { - // In case of bwc testing, we need to delete component and composable - // index templates only for clusters that support this historical feature - if (clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED)) { - Request getTemplatesRequest = new Request("GET", "_index_template"); - Map composableIndexTemplates = XContentHelper.convertToMap( - JsonXContent.jsonXContent, - EntityUtils.toString(adminClient().performRequest(getTemplatesRequest).getEntity()), - false - ); - unexpectedTemplates.addAll( - ((List) composableIndexTemplates.get("index_templates")).stream() - .map(ct -> (String) ((Map) ct).get("name")) - .filter(name -> isXPackTemplate(name) == false) - .collect(Collectors.toSet()) - ); - Request compReq = new Request("GET", "_component_template"); - String componentTemplates = EntityUtils.toString(adminClient().performRequest(compReq).getEntity()); - Map cTemplates = XContentHelper.convertToMap(JsonXContent.jsonXContent, componentTemplates, false); - ((List) cTemplates.get("component_templates")).stream() + Request getTemplatesRequest = new Request("GET", "_index_template"); + Map composableIndexTemplates = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + EntityUtils.toString(adminClient().performRequest(getTemplatesRequest).getEntity()), + false + ); + unexpectedTemplates.addAll( + ((List) composableIndexTemplates.get("index_templates")).stream() .map(ct -> (String) ((Map) ct).get("name")) .filter(name -> isXPackTemplate(name) == false) - .forEach(unexpectedTemplates::add); - } + .collect(Collectors.toSet()) + ); + Request compReq = new Request("GET", "_component_template"); + String componentTemplates = EntityUtils.toString(adminClient().performRequest(compReq).getEntity()); + Map cTemplates = XContentHelper.convertToMap(JsonXContent.jsonXContent, componentTemplates, false); + ((List) cTemplates.get("component_templates")).stream() + .map(ct -> (String) ((Map) ct).get("name")) + .filter(name -> isXPackTemplate(name) == false) + .forEach(unexpectedTemplates::add); if (has(ProductFeature.LEGACY_TEMPLATES)) { Request getLegacyTemplatesRequest = new Request("GET", "_template"); @@ -1142,6 +1111,7 @@ protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOE } final Request deleteRequest = new Request("DELETE", Strings.collectionToCommaDelimitedString(indexPatterns)); deleteRequest.addParameter("expand_wildcards", "open,closed,hidden"); + deleteRequest.setOptions(deleteRequest.getOptions().toBuilder().setWarningsHandler(ignoreAsyncSearchWarning()).build()); final Response response = adminClient().performRequest(deleteRequest); try (InputStream is = response.getEntity().getContent()) { assertTrue((boolean) XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true).get("acknowledged")); @@ -1154,6 +1124,30 @@ protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOE } } + // Make warnings handler that ignores the .async-search warning since .async-search may randomly appear when async requests are slow + // See: https://github.com/elastic/elasticsearch/issues/117099 + protected static WarningsHandler ignoreAsyncSearchWarning() { + return new WarningsHandler() { + @Override + public boolean warningsShouldFailRequest(List warnings) { + if (warnings.isEmpty()) { + return false; + } + return warnings.equals( + List.of( + "this request accesses system indices: [.async-search], " + + "but in a future major version, direct access to system indices will be prevented by default" + ) + ) == false; + } + + @Override + public String toString() { + return "ignore .async-search warning"; + } + }; + } + protected static void wipeDataStreams() throws IOException { try { if (hasXPack()) { @@ -1840,8 +1834,10 @@ public static CreateIndexResponse createIndex(RestClient client, String name, Se if (settings != null && settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) == false) { expectSoftDeletesWarning(request, name); - } - + } else if (isSyntheticSourceConfiguredInMapping(mapping) + && SourceFieldMapper.onOrAfterDeprecateModeVersion(minimumIndexVersion())) { + request.setOptions(expectVersionSpecificWarnings(v -> v.current(SourceFieldMapper.DEPRECATION_WARNING))); + } final Response response = client.performRequest(request); try (var parser = responseAsParser(response)) { return TestResponseParsers.parseCreateIndexResponse(parser); @@ -1885,6 +1881,49 @@ protected static void expectSoftDeletesWarning(Request request, String indexName })); } + @SuppressWarnings("unchecked") + protected static boolean isSyntheticSourceConfiguredInMapping(String mapping) { + if (mapping == null) { + return false; + } + var mappings = XContentHelper.convertToMap( + JsonXContent.jsonXContent, + mapping.trim().startsWith("{") ? mapping : '{' + mapping + '}', + false + ); + if (mappings.containsKey("_doc")) { + mappings = (Map) mappings.get("_doc"); + } + Map sourceMapper = (Map) mappings.get(SourceFieldMapper.NAME); + if (sourceMapper == null) { + return false; + } + return sourceMapper.get("mode") != null; + } + + @SuppressWarnings("unchecked") + protected static boolean isSyntheticSourceConfiguredInTemplate(String template) { + if (template == null) { + return false; + } + var values = XContentHelper.convertToMap(JsonXContent.jsonXContent, template, false); + for (Object value : values.values()) { + Map mappings = (Map) ((Map) value).get("mappings"); + if (mappings == null) { + continue; + } + Map sourceMapper = (Map) mappings.get(SourceFieldMapper.NAME); + if (sourceMapper == null) { + continue; + } + Object mode = sourceMapper.get("mode"); + if (mode != null) { + return true; + } + } + return false; + } + protected static Map getIndexSettings(String index) throws IOException { Request request = new Request("GET", "/" + index + "/_settings"); request.addParameter("flat_settings", "true"); @@ -2282,7 +2321,7 @@ protected static Map> getClusterStateFeatures(RestClient adm */ protected static IndexVersion minimumIndexVersion() throws IOException { final Request request = new Request("GET", "_nodes"); - request.addParameter("filter_path", "nodes.*.version,nodes.*.max_index_version"); + request.addParameter("filter_path", "nodes.*.version,nodes.*.max_index_version,nodes.*.index_version"); final Response response = adminClient().performRequest(request); final Map nodes = ObjectPath.createFromResponse(response).evaluate("nodes"); @@ -2290,10 +2329,13 @@ protected static IndexVersion minimumIndexVersion() throws IOException { IndexVersion minVersion = null; for (Map.Entry node : nodes.entrySet()) { Map nodeData = (Map) node.getValue(); - String versionStr = (String) nodeData.get("max_index_version"); + Object versionStr = nodeData.get("index_version"); + if (versionStr == null) { + versionStr = nodeData.get("max_index_version"); + } // fallback on version if index version is not there IndexVersion indexVersion = versionStr != null - ? IndexVersion.fromId(Integer.parseInt(versionStr)) + ? IndexVersion.fromId(Integer.parseInt(versionStr.toString())) : IndexVersion.fromId( parseLegacyVersion((String) nodeData.get("version")).map(Version::id).orElse(IndexVersions.MINIMUM_COMPATIBLE.id()) ); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index 66c24f157ddfe..9054dc6f94182 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -13,9 +13,6 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.features.FeatureData; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; @@ -25,13 +22,9 @@ import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.file.Files; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; -import java.util.List; -import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Predicate; @@ -48,34 +41,12 @@ class ESRestTestFeatureService implements TestFeatureService { */ private static final Pattern VERSION_FEATURE_PATTERN = Pattern.compile("gte_v(\\d+\\.\\d+\\.\\d+)"); - private final Set knownHistoricalFeatureNames; private final Collection nodeVersions; private final Collection> nodeFeatures; - private final Collection> nodeHistoricalFeatures; - ESRestTestFeatureService(List featureSpecs, Set nodeVersions, Collection> nodeFeatures) { - List specs = new ArrayList<>(featureSpecs); - specs.add(new RestTestLegacyFeatures()); - if (MetadataHolder.HISTORICAL_FEATURES != null) { - specs.add(MetadataHolder.HISTORICAL_FEATURES); - } - FeatureData featureData = FeatureData.createFromSpecifications(specs); - assert featureData.getNodeFeatures().isEmpty() - : Strings.format( - "Only historical features can be injected via ESRestTestCase#additionalTestOnlyHistoricalFeatures(), rejecting %s", - featureData.getNodeFeatures().keySet() - ); - this.knownHistoricalFeatureNames = featureData.getHistoricalFeatures().lastEntry().getValue(); + ESRestTestFeatureService(Set nodeVersions, Collection> nodeFeatures) { this.nodeVersions = nodeVersions; this.nodeFeatures = nodeFeatures; - this.nodeHistoricalFeatures = nodeVersions.stream() - .map(featureData.getHistoricalFeatures()::floorEntry) - .map(Map.Entry::getValue) - .toList(); - } - - public static boolean hasFeatureMetadata() { - return MetadataHolder.HISTORICAL_FEATURES != null; } private static boolean checkCollection(Collection coll, Predicate pred, boolean any) { @@ -84,11 +55,10 @@ private static boolean checkCollection(Collection coll, Predicate pred @Override public boolean clusterHasFeature(String featureId, boolean any) { - if (checkCollection(nodeFeatures, s -> s.contains(featureId), any) - || checkCollection(nodeHistoricalFeatures, s -> s.contains(featureId), any)) { + if (checkCollection(nodeFeatures, s -> s.contains(featureId), any)) { return true; } - if (MetadataHolder.FEATURE_NAMES.contains(featureId) || knownHistoricalFeatureNames.contains(featureId)) { + if (MetadataHolder.FEATURE_NAMES.contains(featureId)) { return false; // feature known but not present } @@ -132,24 +102,20 @@ public boolean clusterHasFeature(String featureId, boolean any) { return false; } + public static boolean hasFeatureMetadata() { + return MetadataHolder.FEATURE_NAMES.isEmpty() == false; + } + private static class MetadataHolder { - private static final FeatureSpecification HISTORICAL_FEATURES; private static final Set FEATURE_NAMES; static { String metadataPath = System.getProperty("tests.features.metadata.path"); if (metadataPath == null) { FEATURE_NAMES = emptySet(); - HISTORICAL_FEATURES = null; } else { Set featureNames = new HashSet<>(); - Map historicalFeatures = new HashMap<>(); loadFeatureMetadata(metadataPath, (key, value) -> { - if (key.equals("historical_features") && value instanceof Map map) { - for (var entry : map.entrySet()) { - historicalFeatures.put(new NodeFeature((String) entry.getKey()), Version.fromString((String) entry.getValue())); - } - } if (key.equals("feature_names") && value instanceof Collection collection) { for (var entry : collection) { featureNames.add((String) entry); @@ -157,13 +123,6 @@ private static class MetadataHolder { } }); FEATURE_NAMES = Collections.unmodifiableSet(featureNames); - Map unmodifiableHistoricalFeatures = Collections.unmodifiableMap(historicalFeatures); - HISTORICAL_FEATURES = new FeatureSpecification() { - @Override - public Map getHistoricalFeatures() { - return unmodifiableHistoricalFeatures; - } - }; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index 9ddcf39d24d98..0c466b9162eb8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -138,11 +138,6 @@ public HttpResponse createResponse(RestStatus status, ChunkedRestResponseBodyPar @Override public void release() {} - @Override - public HttpRequest releaseAndCopy() { - return this; - } - @Override public Exception getInboundException() { return inboundException; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java deleted file mode 100644 index 194dfc057b84f..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.rest; - -import org.elasticsearch.Version; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Map; - -import static java.util.Map.entry; -import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; - -/** - * This class groups historical features that have been removed from the production codebase, but are still used by the test - * framework to support BwC tests. Rather than leaving them in the main src we group them here, so it's clear they are not used in - * production code anymore. - */ -public class RestTestLegacyFeatures implements FeatureSpecification { - public static final NodeFeature ML_STATE_RESET_FALLBACK_ON_DISABLED = new NodeFeature("ml.state_reset_fallback_on_disabled"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature COMPONENT_TEMPLATE_SUPPORTED = new NodeFeature("indices.component_template_supported"); - public static final NodeFeature ML_NEW_MEMORY_FORMAT = new NodeFeature("ml.new_memory_format"); - - /** These are "pure test" features: normally we would not need them, and test for TransportVersion/fallback to Version (see for example - * {@code ESRestTestCase#minimumTransportVersion()}. However, some tests explicitly check and validate the content of a response, so - * we need these features to support them. - */ - public static final NodeFeature TRANSPORT_VERSION_SUPPORTED = new NodeFeature("transport_version_supported"); - public static final NodeFeature STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION = new NodeFeature( - "state.transport_version_to_nodes_version" - ); - - // Ref: https://github.com/elastic/elasticsearch/pull/86416 - public static final NodeFeature ML_MEMORY_OVERHEAD_FIXED = new NodeFeature("ml.memory_overhead_fixed"); - - // QA - rolling upgrade tests - public static final NodeFeature DESIRED_NODE_API_SUPPORTED = new NodeFeature("desired_node_supported"); - public static final NodeFeature SECURITY_UPDATE_API_KEY = new NodeFeature("security.api_key_update"); - public static final NodeFeature SECURITY_BULK_UPDATE_API_KEY = new NodeFeature("security.api_key_bulk_update"); - - public static final NodeFeature TSDB_NEW_INDEX_FORMAT = new NodeFeature("indices.tsdb_new_format"); - public static final NodeFeature TSDB_GENERALLY_AVAILABLE = new NodeFeature("indices.tsdb_supported"); - - public static final NodeFeature TSDB_DOWNSAMPLING_STABLE = new NodeFeature("indices.tsdb_downsampling_stable"); - - /* - * A composable index template with no template defined in the body is mistakenly always assumed to not be a time series template. - * Fixed in #98840 - */ - public static final NodeFeature TSDB_EMPTY_TEMPLATE_FIXED = new NodeFeature("indices.tsdb_empty_composable_template_fixed"); - public static final NodeFeature SYNTHETIC_SOURCE_SUPPORTED = new NodeFeature("indices.synthetic_source"); - - public static final NodeFeature DESIRED_BALANCED_ALLOCATOR_SUPPORTED = new NodeFeature("allocator.desired_balance"); - - /* - * Cancel shard allocation command is broken for initial desired balance versions - * and might allocate shard on the node where it is not supposed to be. This - * is fixed by https://github.com/elastic/elasticsearch/pull/93635. - */ - public static final NodeFeature DESIRED_BALANCED_ALLOCATOR_FIXED = new NodeFeature("allocator.desired_balance_fixed"); - public static final NodeFeature INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED = new NodeFeature("settings.indexing_slowlog_level_removed"); - public static final NodeFeature DEPRECATION_WARNINGS_LEAK_FIXED = new NodeFeature("deprecation_warnings_leak_fixed"); - - // QA - Full cluster restart - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature REPLICATION_OF_CLOSED_INDICES = new NodeFeature("indices.closed_replication_supported"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature SOFT_DELETES_ENFORCED = new NodeFeature("indices.soft_deletes_enforced"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature NEW_TRANSPORT_COMPRESSED_SETTING = new NodeFeature("transport.new_compressed_setting"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature SERVICE_ACCOUNTS_SUPPORTED = new NodeFeature("auth.service_accounts_supported"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature TRANSFORM_SUPPORTED = new NodeFeature("transform.supported"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature SLM_SUPPORTED = new NodeFeature("slm.supported"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature DATA_STREAMS_SUPPORTED = new NodeFeature("data_stream.supported"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature NEW_DATA_STREAMS_INDEX_NAME_FORMAT = new NodeFeature("data_stream.new_index_name_format"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature DISABLE_FIELD_NAMES_FIELD_REMOVED = new NodeFeature("disable_of_field_names_field_removed"); - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - public static final NodeFeature ML_NLP_SUPPORTED = new NodeFeature("ml.nlp_supported"); - - /* - * Starting with 8.11, cluster state has minimum system index mappings versions (#99307) and the system index mappings upgrade service - * started using them to determine when to update mappings for system indices. See https://github.com/elastic/elasticsearch/pull/99668 - */ - public static final NodeFeature MAPPINGS_UPGRADE_SERVICE_USES_MAPPINGS_VERSION = new NodeFeature( - "mappings.upgrade_service_uses_mappings_version" - ); - - // YAML - public static final NodeFeature REST_ELASTIC_PRODUCT_HEADER_PRESENT = new NodeFeature("action.rest.product_header_present"); - - @Override - public Map getHistoricalFeatures() { - return Map.ofEntries( - entry(COMPONENT_TEMPLATE_SUPPORTED, Version.V_7_8_0), - entry(ML_STATE_RESET_FALLBACK_ON_DISABLED, Version.V_8_7_0), - entry(SECURITY_UPDATE_API_KEY, Version.V_8_4_0), - entry(SECURITY_BULK_UPDATE_API_KEY, Version.V_8_5_0), - entry(ML_NEW_MEMORY_FORMAT, Version.V_8_11_0), - entry(TRANSPORT_VERSION_SUPPORTED, VERSION_INTRODUCING_TRANSPORT_VERSIONS), - entry(STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION, Version.V_8_11_0), - entry(ML_MEMORY_OVERHEAD_FIXED, Version.V_8_2_1), - entry(REST_ELASTIC_PRODUCT_HEADER_PRESENT, Version.V_8_0_1), - entry(DESIRED_NODE_API_SUPPORTED, Version.V_8_1_0), - entry(TSDB_NEW_INDEX_FORMAT, Version.V_8_2_0), - entry(SYNTHETIC_SOURCE_SUPPORTED, Version.V_8_4_0), - entry(DESIRED_BALANCED_ALLOCATOR_SUPPORTED, Version.V_8_6_0), - entry(DESIRED_BALANCED_ALLOCATOR_FIXED, Version.V_8_7_1), - entry(TSDB_GENERALLY_AVAILABLE, Version.V_8_7_0), - entry(TSDB_DOWNSAMPLING_STABLE, Version.V_8_10_0), - entry(TSDB_EMPTY_TEMPLATE_FIXED, Version.V_8_11_0), - entry(INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED, Version.V_8_0_0), - entry(DEPRECATION_WARNINGS_LEAK_FIXED, Version.V_7_17_9), - entry(REPLICATION_OF_CLOSED_INDICES, Version.V_7_2_0), - entry(SOFT_DELETES_ENFORCED, Version.V_8_0_0), - entry(NEW_TRANSPORT_COMPRESSED_SETTING, Version.V_7_14_0), - entry(SERVICE_ACCOUNTS_SUPPORTED, Version.V_7_13_0), - entry(TRANSFORM_SUPPORTED, Version.V_7_2_0), - entry(SLM_SUPPORTED, Version.V_7_4_0), - entry(DATA_STREAMS_SUPPORTED, Version.V_7_9_0), - entry(NEW_DATA_STREAMS_INDEX_NAME_FORMAT, Version.V_7_11_0), - entry(DISABLE_FIELD_NAMES_FIELD_REMOVED, Version.V_8_0_0), - entry(ML_NLP_SUPPORTED, Version.V_8_0_0), - entry(MAPPINGS_UPGRADE_SERVICE_USES_MAPPINGS_VERSION, Version.V_8_11_0) - ); - } -} diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json index e69c2db6ff400..58e0ea5fd9fc5 100644 --- a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json @@ -21,7 +21,52 @@ "dict_dec":{ "type":"dictionary_decompounder", "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"] - } + }, + "hyphenation_dec_only_longest_match": { + "type": "hyphenation_decompounder", + "hyphenation_patterns_path": "de_DR.xml", + "word_list": [ + "fuss", + "fussball", + "ballpumpe", + "ball", + "pumpe", + "kaffee", + "fee", + "maschine" + ], + "only_longest_match": true + }, + "hyphenation_dec_no_sub_matches": { + "type": "hyphenation_decompounder", + "hyphenation_patterns_path": "de_DR.xml", + "word_list": [ + "fuss", + "fussball", + "ballpumpe", + "ball", + "pumpe", + "kaffee", + "fee", + "maschine" + ], + "no_sub_matches": true + }, + "hyphenation_dec_no_overlapping_matches": { + "type": "hyphenation_decompounder", + "hyphenation_patterns_path": "de_DR.xml", + "word_list": [ + "fuss", + "fussball", + "ballpumpe", + "ball", + "pumpe", + "kaffee", + "fee", + "maschine" + ], + "no_overlapping_matches": true + } }, "analyzer":{ "standard":{ @@ -47,6 +92,18 @@ "decompoundingAnalyzer":{ "tokenizer":"standard", "filter":["dict_dec"] + }, + "hyphenationDecompoundingAnalyzerOnlyLongestMatch":{ + "tokenizer":"standard", + "filter":["hyphenation_dec_only_longest_match"] + }, + "hyphenationDecompoundingAnalyzerNoSubMatches": { + "tokenizer":"standard", + "filter":["hyphenation_dec_no_sub_matches"] + }, + "hyphenationDecompoundingAnalyzerNoOverlappingMatches":{ + "tokenizer":"standard", + "filter":["hyphenation_dec_no_overlapping_matches"] } } } diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml index 82f933296a314..095b27e0fa071 100644 --- a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml @@ -15,6 +15,21 @@ index : dict_dec : type : dictionary_decompounder word_list : [donau, dampf, schiff, spargel, creme, suppe] + hyphenation_dec_only_longest_match : + type : hyphenation_decompounder + hyphenation_patterns_path : de_DR.xml + word_list : [fuss, fussball, ballpumpe, ball, pumpe, kaffee, fee, maschine] + only_longest_match : true + hyphenation_dec_no_sub_matches : + type : hyphenation_decompounder + hyphenation_patterns_path : de_DR.xml + word_list : [fuss, fussball, ballpumpe, ball, pumpe, kaffee, fee, maschine] + no_sub_matches : true + hyphenation_dec_no_overlapping_matches : + type : hyphenation_decompounder + hyphenation_patterns_path : de_DR.xml + word_list : [fuss, fussball, ballpumpe, ball, pumpe, kaffee, fee, maschine] + no_overlapping_matches: true analyzer : standard : type : standard @@ -37,3 +52,13 @@ index : decompoundingAnalyzer : tokenizer : standard filter : [dict_dec] + hyphenationDecompoundingAnalyzerOnlyLongestMatch : + tokenizer : standard + filter : [hyphenation_dec_only_longest_match] + hyphenationDecompoundingAnalyzerNoSubMatches: + tokenizer: standard + filter : [hyphenation_dec_no_sub_matches] + hyphenationDecompoundingAnalyzerNoOverlappingMatches: + tokenizer: standard + filter : [hyphenation_dec_no_overlapping_matches] + diff --git a/test/framework/src/test/java/org/elasticsearch/http/HttpHeaderParserTests.java b/test/framework/src/test/java/org/elasticsearch/http/HttpHeaderParserTests.java new file mode 100644 index 0000000000000..e025e7770ea4c --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/http/HttpHeaderParserTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.fixture.HttpHeaderParser; + +import java.math.BigInteger; + +public class HttpHeaderParserTests extends ESTestCase { + + public void testParseRangeHeader() { + final long start = randomLongBetween(0, 10_000); + final long end = randomLongBetween(start, start + 10_000); + assertEquals(new HttpHeaderParser.Range(start, end), HttpHeaderParser.parseRangeHeader("bytes=" + start + "-" + end)); + } + + public void testParseRangeHeaderInvalidLong() { + final BigInteger longOverflow = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE).add(randomBigInteger()); + assertNull(HttpHeaderParser.parseRangeHeader("bytes=123-" + longOverflow)); + assertNull(HttpHeaderParser.parseRangeHeader("bytes=" + longOverflow + "-123")); + } + + public void testParseRangeHeaderMultipleRangesNotMatched() { + assertNull( + HttpHeaderParser.parseRangeHeader( + Strings.format( + "bytes=%d-%d,%d-%d", + randomIntBetween(0, 99), + randomIntBetween(100, 199), + randomIntBetween(200, 299), + randomIntBetween(300, 399) + ) + ) + ); + } + + public void testParseRangeHeaderEndlessRangeNotMatched() { + assertNull(HttpHeaderParser.parseRangeHeader(Strings.format("bytes=%d-", randomLongBetween(0, Long.MAX_VALUE)))); + } + + public void testParseRangeHeaderSuffixLengthNotMatched() { + assertNull(HttpHeaderParser.parseRangeHeader(Strings.format("bytes=-%d", randomLongBetween(0, Long.MAX_VALUE)))); + } +} diff --git a/test/immutable-collections-patch/build.gradle b/test/immutable-collections-patch/build.gradle index 28aad96754629..85a199af2d477 100644 --- a/test/immutable-collections-patch/build.gradle +++ b/test/immutable-collections-patch/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.java' @@ -26,14 +25,14 @@ def outputDir = layout.buildDirectory.dir("jdk-patches") def generatePatch = tasks.register("generatePatch", JavaExec) generatePatch.configure { dependsOn tasks.named("compileJava") - inputs.property("java-home-set", BuildParams.getIsRuntimeJavaHomeSet()) - inputs.property("java-version", BuildParams.runtimeJavaVersion) + inputs.property("java-home-set", buildParams.getIsRuntimeJavaHomeSet()) + inputs.property("java-version", buildParams.runtimeJavaVersion) outputs.dir(outputDir) classpath = sourceSets.main.runtimeClasspath mainClass = 'org.elasticsearch.jdk.patch.ImmutableCollectionsPatcher' - if (BuildParams.getIsRuntimeJavaHomeSet()) { - executable = "${BuildParams.runtimeJavaHome}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') + if (buildParams.getIsRuntimeJavaHomeSet()) { + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') } else { javaLauncher = javaToolchains.launcherFor { languageVersion = JavaLanguageVersion.of(VersionProperties.bundledJdkMajorVersion) diff --git a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/ClusterFeaturesMetadataExtractor.java similarity index 69% rename from test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java rename to test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/ClusterFeaturesMetadataExtractor.java index 3ffa27126fac8..3a090a1b3fadc 100644 --- a/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractor.java +++ b/test/metadata-extractor/src/main/java/org/elasticsearch/extractor/features/ClusterFeaturesMetadataExtractor.java @@ -9,9 +9,8 @@ package org.elasticsearch.extractor.features; -import org.elasticsearch.Version; -import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.logging.LogConfigurator; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.XContentGenerator; @@ -25,14 +24,12 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardOpenOption; -import java.util.HashMap; import java.util.HashSet; -import java.util.Map; import java.util.ServiceLoader; import java.util.Set; import java.util.stream.Stream; -public class HistoricalFeaturesMetadataExtractor { +public class ClusterFeaturesMetadataExtractor { private final ClassLoader classLoader; static { @@ -40,7 +37,7 @@ public class HistoricalFeaturesMetadataExtractor { LogConfigurator.configureESLogging(); } - public HistoricalFeaturesMetadataExtractor(ClassLoader classLoader) { + public ClusterFeaturesMetadataExtractor(ClassLoader classLoader) { this.classLoader = classLoader; } @@ -56,9 +53,7 @@ public static void main(String[] args) { printUsageAndExit(); } - new HistoricalFeaturesMetadataExtractor(HistoricalFeaturesMetadataExtractor.class.getClassLoader()).generateMetadataFile( - outputFile - ); + new ClusterFeaturesMetadataExtractor(ClusterFeaturesMetadataExtractor.class.getClassLoader()).generateMetadataFile(outputFile); } public void generateMetadataFile(Path outputFile) { @@ -67,13 +62,7 @@ public void generateMetadataFile(Path outputFile) { XContentGenerator generator = JsonXContent.jsonXContent.createGenerator(os) ) { generator.writeStartObject(); - extractHistoricalFeatureMetadata((historical, names) -> { - generator.writeFieldName("historical_features"); - generator.writeStartObject(); - for (Map.Entry entry : historical.entrySet()) { - generator.writeStringField(entry.getKey().id(), entry.getValue().toString()); - } - generator.writeEndObject(); + extractClusterFeaturesMetadata(names -> { generator.writeFieldName("feature_names"); generator.writeStartArray(); for (var entry : names) { @@ -87,22 +76,19 @@ public void generateMetadataFile(Path outputFile) { } } - void extractHistoricalFeatureMetadata(CheckedBiConsumer, Set, IOException> metadataConsumer) - throws IOException { - Map historicalFeatures = new HashMap<>(); + void extractClusterFeaturesMetadata(CheckedConsumer, IOException> metadataConsumer) throws IOException { Set featureNames = new HashSet<>(); ServiceLoader featureSpecLoader = ServiceLoader.load(FeatureSpecification.class, classLoader); for (FeatureSpecification featureSpecification : featureSpecLoader) { - historicalFeatures.putAll(featureSpecification.getHistoricalFeatures()); Stream.concat(featureSpecification.getFeatures().stream(), featureSpecification.getTestFeatures().stream()) .map(NodeFeature::id) .forEach(featureNames::add); } - metadataConsumer.accept(historicalFeatures, featureNames); + metadataConsumer.accept(featureNames); } private static void printUsageAndExit() { - System.err.println("Usage: HistoricalFeaturesMetadataExtractor "); + System.err.println("Usage: ClusterFeaturesMetadataExtractor "); System.exit(1); } } diff --git a/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java b/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/ClusterFeaturesMetadataExtractorTests.java similarity index 66% rename from test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java rename to test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/ClusterFeaturesMetadataExtractorTests.java index e230982073699..af69aaff86cc5 100644 --- a/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/HistoricalFeaturesMetadataExtractorTests.java +++ b/test/metadata-extractor/src/test/java/org/elasticsearch/extractor/features/ClusterFeaturesMetadataExtractorTests.java @@ -9,8 +9,6 @@ package org.elasticsearch.extractor.features; -import org.elasticsearch.Version; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; @@ -21,34 +19,26 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Collection; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import static org.elasticsearch.xcontent.XContentParserConfiguration.EMPTY; -import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; -public class HistoricalFeaturesMetadataExtractorTests extends ESTestCase { +public class ClusterFeaturesMetadataExtractorTests extends ESTestCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); public void testExtractHistoricalMetadata() throws IOException { - HistoricalFeaturesMetadataExtractor extractor = new HistoricalFeaturesMetadataExtractor(this.getClass().getClassLoader()); - Map nodeFeatureVersionMap = new HashMap<>(); + ClusterFeaturesMetadataExtractor extractor = new ClusterFeaturesMetadataExtractor(this.getClass().getClassLoader()); Set featureNamesSet = new HashSet<>(); - extractor.extractHistoricalFeatureMetadata((historical, names) -> { - nodeFeatureVersionMap.putAll(historical); - featureNamesSet.addAll(names); - }); - assertThat(nodeFeatureVersionMap, not(anEmptyMap())); + extractor.extractClusterFeaturesMetadata(featureNamesSet::addAll); assertThat(featureNamesSet, not(empty())); assertThat(featureNamesSet, hasItem("test_features_enabled")); @@ -56,11 +46,7 @@ public void testExtractHistoricalMetadata() throws IOException { extractor.generateMetadataFile(outputFile); try (XContentParser parser = JsonXContent.jsonXContent.createParser(EMPTY, Files.newInputStream(outputFile))) { Map parsedMap = parser.map(); - assertThat(parsedMap, hasKey("historical_features")); assertThat(parsedMap, hasKey("feature_names")); - @SuppressWarnings("unchecked") - Map historicalFeaturesMap = (Map) (parsedMap.get("historical_features")); - nodeFeatureVersionMap.forEach((key, value) -> assertThat(historicalFeaturesMap, hasEntry(key.id(), value.toString()))); @SuppressWarnings("unchecked") Collection featureNamesList = (Collection) (parsedMap.get("feature_names")); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index f996db92e57f4..11787866af0d7 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -18,9 +18,7 @@ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null), - SUB_OBJECTS_AUTO_ENABLED("es.sub_objects_auto_feature_flag_enabled=true", Version.fromString("8.16.0"), null), - INFERENCE_DEFAULT_ELSER("es.inference_default_elser_feature_flag_enabled=true", Version.fromString("8.16.0"), null), - ML_SCALE_FROM_ZERO("es.ml_scale_from_zero_feature_flag_enabled=true", Version.fromString("8.16.0"), null); + SUB_OBJECTS_AUTO_ENABLED("es.sub_objects_auto_feature_flag_enabled=true", Version.fromString("8.16.0"), null); public final String systemProperty; public final Version from; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index ec1bf13bd993b..2dac2ee232aa5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Properties; import java.util.Set; import java.util.UUID; @@ -59,6 +60,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.function.Predicate.not; import static org.elasticsearch.test.cluster.local.distribution.DistributionType.DEFAULT; import static org.elasticsearch.test.cluster.util.OS.WINDOWS; @@ -472,6 +474,7 @@ private void createKeystore() { private void addKeystoreSettings() { spec.resolveKeystore().forEach((key, value) -> { + Objects.requireNonNull(value, "keystore setting for '" + key + "' may not be null"); String input = spec.getKeystorePassword() == null || spec.getKeystorePassword().isEmpty() ? value : spec.getKeystorePassword() + "\n" + value; @@ -755,18 +758,16 @@ private Map getEnvironmentVariables() { } String heapSize = System.getProperty("tests.heap.size", "512m"); - final String esJavaOpts = Stream.of( - "-Xms" + heapSize, - "-Xmx" + heapSize, - "-ea", - "-esa", - System.getProperty("tests.jvm.argline", ""), - featureFlagProperties, - systemProperties, - jvmArgs, - debugArgs - ).filter(s -> s.isEmpty() == false).collect(Collectors.joining(" ")); + List serverOpts = List.of("-Xms" + heapSize, "-Xmx" + heapSize, debugArgs, featureFlagProperties); + List commonOpts = List.of("-ea", "-esa", System.getProperty("tests.jvm.argline", ""), systemProperties, jvmArgs); + + String esJavaOpts = Stream.concat(serverOpts.stream(), commonOpts.stream()) + .filter(not(String::isEmpty)) + .collect(Collectors.joining(" ")); + String cliJavaOpts = commonOpts.stream().filter(not(String::isEmpty)).collect(Collectors.joining(" ")); + environment.put("ES_JAVA_OPTS", esJavaOpts); + environment.put("CLI_JAVA_OPTS", cliJavaOpts); return environment; } diff --git a/test/test-clusters/src/main/resources/fips/fips_java.policy b/test/test-clusters/src/main/resources/fips/fips_java.policy index c259b0bc908d8..781e1247db7a5 100644 --- a/test/test-clusters/src/main/resources/fips/fips_java.policy +++ b/test/test-clusters/src/main/resources/fips/fips_java.policy @@ -5,6 +5,7 @@ grant { permission java.security.SecurityPermission "getProperty.jdk.tls.disabledAlgorithms"; permission java.security.SecurityPermission "getProperty.jdk.certpath.disabledAlgorithms"; permission java.security.SecurityPermission "getProperty.jdk.tls.server.defaultDHEParameters"; + permission java.security.SecurityPermission "getProperty.org.bouncycastle.ec.max_f2m_field_size"; permission java.lang.RuntimePermission "getProtectionDomain"; permission java.util.PropertyPermission "java.runtime.name", "read"; permission org.bouncycastle.crypto.CryptoServicesPermission "tlsAlgorithmsEnabled"; @@ -20,6 +21,6 @@ grant { }; // rely on the caller's socket permissions, the JSSE TLS implementation here is always allowed to connect -grant codeBase "file:${jdk.module.path}/bctls-fips-1.0.17.jar" { +grant codeBase "file:${jdk.module.path}/bctls-fips-1.0.19.jar" { permission java.net.SocketPermission "*", "connect"; }; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 86c3f42a6a8ec..627554f6b261d 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -20,8 +20,8 @@ import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Tuple; import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponseException; @@ -371,13 +371,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx ? executionContext.getClientYamlTestCandidate().getTestPath() : null; - var fixedProductionHeader = executionContext.clusterHasFeature( - RestTestLegacyFeatures.REST_ELASTIC_PRODUCT_HEADER_PRESENT.id(), - false - ); - if (fixedProductionHeader) { - checkElasticProductHeader(response.getHeaders("X-elastic-product")); - } + checkElasticProductHeader(response.getHeaders("X-elastic-product")); checkWarningHeaders(response.getWarningHeaders(), testPath); } catch (ClientYamlTestResponseException e) { checkResponseException(e, executionContext); @@ -502,6 +496,8 @@ public void checkWarningHeaders(final List warningHeaders, String testPa } } + unexpected.removeIf(s -> s.endsWith(SourceFieldMapper.DEPRECATION_WARNING + "\"")); + if (unexpected.isEmpty() == false || unmatched.isEmpty() == false || missing.isEmpty() == false diff --git a/x-pack/libs/es-opensaml-security-api/build.gradle b/x-pack/libs/es-opensaml-security-api/build.gradle index b36d0bfa7b37d..3b4434ec5d9e5 100644 --- a/x-pack/libs/es-opensaml-security-api/build.gradle +++ b/x-pack/libs/es-opensaml-security-api/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' dependencies { implementation "org.opensaml:opensaml-security-api:${versions.opensaml}" diff --git a/x-pack/plugin/analytics/build.gradle b/x-pack/plugin/analytics/build.gradle index c451df58b9fab..00f28b4badc3d 100644 --- a/x-pack/plugin/analytics/build.gradle +++ b/x-pack/plugin/analytics/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' @@ -21,7 +26,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java index 0f732d2017c74..0d42a2856a10e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java @@ -42,8 +42,7 @@ public static class Bucket extends AbstractInternalTerms.AbstractTermsBucket formats; protected List terms; protected List keyConverters; @@ -60,8 +59,7 @@ public Bucket( this.terms = terms; this.docCount = docCount; this.aggregations = aggregations; - this.showDocCountError = showDocCountError; - this.docCountError = docCountError; + this.docCountError = showDocCountError ? docCountError : -1; this.formats = formats; this.keyConverters = keyConverters; } @@ -71,7 +69,6 @@ protected Bucket(StreamInput in, List formats, List formats, List getBuckets() { @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - return doXContentCommon(builder, params, docCountError, otherDocCount, buckets); + return doXContentCommon(builder, params, showTermDocCountError, docCountError, otherDocCount, buckets); } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java index 85882a5c56851..1691aedf543f4 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java @@ -20,6 +20,8 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasables; @@ -235,57 +237,62 @@ protected void doClose() { } @Override - public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalMultiTerms.Bucket[][] topBucketsPerOrd = new InternalMultiTerms.Bucket[owningBucketOrds.length][]; - long[] otherDocCounts = new long[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); - - int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); - try ( - ObjectArrayPriorityQueue ordered = new BucketPriorityQueue<>( - size, - bigArrays(), - partiallyBuiltBucketComparator - ) - ) { - InternalMultiTerms.Bucket spare = null; - BytesRef spareKey = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters); - spareKey = new BytesRef(); + public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + try ( + LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true); + ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size()) + ) { + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd); + + int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); + try ( + ObjectArrayPriorityQueue ordered = new BucketPriorityQueue<>( + size, + bigArrays(), + partiallyBuiltBucketComparator + ) + ) { + InternalMultiTerms.Bucket spare = null; + BytesRef spareKey = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters); + spareKey = new BytesRef(); + } + ordsEnum.readValue(spareKey); + spare.terms = unpackTerms(spareKey); + spare.docCount = docCount; + spare.bucketOrd = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); } - ordsEnum.readValue(spareKey); - spare.terms = unpackTerms(spareKey); - spare.docCount = docCount; - spare.bucketOrd = ordsEnum.ord(); - spare = ordered.insertWithOverflow(spare); - } - // Get the top buckets - InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[(int) ordered.size()]; - topBucketsPerOrd[ordIdx] = bucketsForOrd; - for (int b = (int) ordered.size() - 1; b >= 0; --b) { - topBucketsPerOrd[ordIdx][b] = ordered.pop(); - otherDocCounts[ordIdx] -= topBucketsPerOrd[ordIdx][b].getDocCount(); + // Get the top buckets + InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[(int) ordered.size()]; + topBucketsPerOrd.set(ordIdx, bucketsForOrd); + for (int b = (int) ordered.size() - 1; b >= 0; --b) { + InternalMultiTerms.Bucket[] buckets = topBucketsPerOrd.get(ordIdx); + buckets[b] = ordered.pop(); + otherDocCounts.increment(ordIdx, -buckets[b].getDocCount()); + } } } - } - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); + buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); - InternalAggregation[] result = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { - result[ordIdx] = buildResult(otherDocCounts[ordIdx], topBucketsPerOrd[ordIdx]); + return buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> buildResult(otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx)) + ); } - return result; } InternalMultiTerms buildResult(long otherDocCount, InternalMultiTerms.Bucket[] topBuckets) { @@ -305,7 +312,7 @@ InternalMultiTerms buildResult(long otherDocCount, InternalMultiTerms.Bucket[] t bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, - List.of(topBuckets), + Arrays.asList(topBuckets), 0, formats, keyConverters, diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java index 466c9e4f006dc..228ac401b96bb 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java @@ -10,7 +10,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ClientHelper; @@ -33,8 +32,7 @@ public APMIndexTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry, - FeatureService featureService + NamedXContentRegistry xContentRegistry ) { super( nodeSettings, @@ -42,7 +40,6 @@ public APMIndexTemplateRegistry( threadPool, client, xContentRegistry, - featureService, templateFilter(isDataStreamsLifecycleOnlyMode(clusterService.getSettings())) ); } diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java index aefb45f6186c1..0be95c337838a 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java @@ -48,14 +48,7 @@ public Collection createComponents(PluginServices services) { Settings settings = services.environment().settings(); ClusterService clusterService = services.clusterService(); registry.set( - new APMIndexTemplateRegistry( - settings, - clusterService, - services.threadPool(), - services.client(), - services.xContentRegistry(), - services.featureService() - ) + new APMIndexTemplateRegistry(settings, clusterService, services.threadPool(), services.client(), services.xContentRegistry()) ); if (enabled) { APMIndexTemplateRegistry registryInstance = registry.get(); diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/apm@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/apm@mappings.yaml index ac6462c86676c..a5a3a7433f4c1 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/apm@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/apm@mappings.yaml @@ -4,6 +4,7 @@ _meta: managed: true template: mappings: + date_detection: false dynamic: true dynamic_templates: - numeric_labels: diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@mappings.yaml index af28cbb7415a0..660db3a6b0e2e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@mappings.yaml @@ -5,8 +5,6 @@ _meta: managed: true template: mappings: - _source: - mode: synthetic properties: processor.event: type: constant_keyword diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@settings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@settings.yaml index 819d5d7eafb8e..d8fc13bce79b1 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@settings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm@settings.yaml @@ -5,10 +5,12 @@ _meta: managed: true template: settings: - codec: best_compression - mapping: - # apm@settings sets `ignore_malformed: true`, but we need - # to disable this for metrics since they use synthetic source, - # and this combination is incompatible with the - # aggregate_metric_double field type. - ignore_malformed: false + index: + codec: best_compression + mapping: + # apm@settings sets `ignore_malformed: true`, but we need + # to disable this for metrics since they use synthetic source, + # and this combination is incompatible with the + # aggregate_metric_double field type. + ignore_malformed: false + source.mode: synthetic diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index fa209cdec3695..9484f577583eb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 11 +version: 12 component-templates: # Data lifecycle. diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMDSLOnlyTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMDSLOnlyTests.java index 476b504339e62..b18e95b55dde0 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMDSLOnlyTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMDSLOnlyTests.java @@ -14,8 +14,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.DataStreamFeatures; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -24,7 +22,6 @@ import org.junit.After; import org.junit.Before; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -56,15 +53,13 @@ public void createRegistryAndClient() { additionalSettings, clusterSettings ); - FeatureService featureService = new FeatureService(List.of(new DataStreamFeatures())); apmIndexTemplateRegistry = new APMIndexTemplateRegistry( Settings.EMPTY, clusterService, threadPool, client, - NamedXContentRegistry.EMPTY, - featureService + NamedXContentRegistry.EMPTY ); apmIndexTemplateRegistry.setEnabled(true); } diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 4a2b9265b3b05..32e7c2225e19d 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.apmdata; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -30,8 +29,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.DataStreamFeatures; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -92,9 +89,8 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); - FeatureService featureService = new FeatureService(List.of(new DataStreamFeatures())); stackTemplateRegistryAccessor = new StackTemplateRegistryAccessor( - new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY, featureService) + new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY) ); apmIndexTemplateRegistry = new APMIndexTemplateRegistry( @@ -102,8 +98,7 @@ public void createRegistryAndClient() { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY, - featureService + NamedXContentRegistry.EMPTY ); apmIndexTemplateRegistry.setEnabled(true); } @@ -408,25 +403,6 @@ public void testIndexTemplateConventions() throws Exception { } } - public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { - DiscoveryNode updatedNode = DiscoveryNodeUtils.create("updatedNode"); - DiscoveryNode outdatedNode = DiscoveryNodeUtils.create("outdatedNode", ESTestCase.buildNewFakeTransportAddress(), Version.V_8_10_0); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .localNodeId("updatedNode") - .masterNodeId("updatedNode") - .add(updatedNode) - .add(outdatedNode) - .build(); - - client.setVerifier((a, r, l) -> { - fail("if some cluster mode are not updated to at least v.8.11.0 nothing should happen"); - return null; - }); - - ClusterChangedEvent event = createClusterChangedEvent(Map.of(), Map.of(), nodes); - apmIndexTemplateRegistry.clusterChanged(event); - } - public void testILMComponentTemplatesInstalled() throws Exception { int ilmFallbackCount = 0; for (Map.Entry entry : apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet()) { diff --git a/x-pack/plugin/async-search/qa/rest/build.gradle b/x-pack/plugin/async-search/qa/rest/build.gradle index 4fc557a5b6048..eb758c2c0ef5e 100644 --- a/x-pack/plugin/async-search/qa/rest/build.gradle +++ b/x-pack/plugin/async-search/qa/rest/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.base-internal-es-plugin' apply plugin: 'elasticsearch.internal-java-rest-test' @@ -28,5 +33,5 @@ testClusters.configureEach { // Test clusters run with security disabled tasks.named("yamlRestTest") { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/autoscaling/qa/rest/build.gradle b/x-pack/plugin/autoscaling/qa/rest/build.gradle index c79644ee31225..903e76fd986cf 100644 --- a/x-pack/plugin/autoscaling/qa/rest/build.gradle +++ b/x-pack/plugin/autoscaling/qa/rest/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index a253b6bdd2360..0fb4267745cb8 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -115,24 +115,16 @@ public LongHistogram getCacheMissLoadTimes() { * * @param bytesCopied The number of bytes copied * @param copyTimeNanos The time taken to copy the bytes in nanoseconds - * @param index The index being loaded - * @param shardId The ID of the shard being loaded * @param cachePopulationReason The reason for the cache being populated * @param cachePopulationSource The source from which the data is being loaded */ public void recordCachePopulationMetrics( int bytesCopied, long copyTimeNanos, - String index, - int shardId, CachePopulationReason cachePopulationReason, CachePopulationSource cachePopulationSource ) { Map metricAttributes = Map.of( - INDEX_ATTRIBUTE_KEY, - index, - SHARD_ID_ATTRIBUTE_KEY, - shardId, CACHE_POPULATION_REASON_ATTRIBUTE_KEY, cachePopulationReason.name(), CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY, diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java index ea9d0b7356f0e..435798ba93a8b 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/BlobCacheMetricsTests.java @@ -30,15 +30,11 @@ public void createMetrics() { public void testRecordCachePopulationMetricsRecordsThroughput() { int mebiBytesSent = randomIntBetween(1, 4); int secondsTaken = randomIntBetween(1, 5); - String indexName = randomIdentifier(); - int shardId = randomIntBetween(0, 10); BlobCacheMetrics.CachePopulationReason cachePopulationReason = randomFrom(BlobCacheMetrics.CachePopulationReason.values()); CachePopulationSource cachePopulationSource = randomFrom(CachePopulationSource.values()); metrics.recordCachePopulationMetrics( Math.toIntExact(ByteSizeValue.ofMb(mebiBytesSent).getBytes()), TimeUnit.SECONDS.toNanos(secondsTaken), - indexName, - shardId, cachePopulationReason, cachePopulationSource ); @@ -48,32 +44,28 @@ public void testRecordCachePopulationMetricsRecordsThroughput() { .getMeasurements(InstrumentType.DOUBLE_HISTOGRAM, "es.blob_cache.population.throughput.histogram") .get(0); assertEquals(throughputMeasurement.getDouble(), (double) mebiBytesSent / secondsTaken, 0.0); - assertExpectedAttributesPresent(throughputMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(throughputMeasurement, cachePopulationReason, cachePopulationSource); // bytes counter Measurement totalBytesMeasurement = recordingMeterRegistry.getRecorder() .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.bytes.total") .get(0); assertEquals(totalBytesMeasurement.getLong(), ByteSizeValue.ofMb(mebiBytesSent).getBytes()); - assertExpectedAttributesPresent(totalBytesMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(totalBytesMeasurement, cachePopulationReason, cachePopulationSource); // time counter Measurement totalTimeMeasurement = recordingMeterRegistry.getRecorder() .getMeasurements(InstrumentType.LONG_COUNTER, "es.blob_cache.population.time.total") .get(0); assertEquals(totalTimeMeasurement.getLong(), TimeUnit.SECONDS.toMillis(secondsTaken)); - assertExpectedAttributesPresent(totalTimeMeasurement, shardId, indexName, cachePopulationReason, cachePopulationSource); + assertExpectedAttributesPresent(totalTimeMeasurement, cachePopulationReason, cachePopulationSource); } private static void assertExpectedAttributesPresent( Measurement measurement, - int shardId, - String indexName, BlobCacheMetrics.CachePopulationReason cachePopulationReason, CachePopulationSource cachePopulationSource ) { - assertEquals(measurement.attributes().get(BlobCacheMetrics.SHARD_ID_ATTRIBUTE_KEY), shardId); - assertEquals(measurement.attributes().get(BlobCacheMetrics.INDEX_ATTRIBUTE_KEY), indexName); assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_REASON_ATTRIBUTE_KEY), cachePopulationReason.name()); assertEquals(measurement.attributes().get(BlobCacheMetrics.CACHE_POPULATION_SOURCE_ATTRIBUTE_KEY), cachePopulationSource.name()); } diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 193a82436f26a..26040529b04df 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -1,6 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask import org.elasticsearch.gradle.util.GradleUtils @@ -37,7 +43,7 @@ artifacts { def restTestBlacklist = [] // TODO: fix this rest test to not depend on a hardcoded port! restTestBlacklist.addAll(['getting_started/10_monitor_cluster_health/*']) -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { // these tests attempt to install basic/internal licenses signed against the dev/public.key // Since there is no infrastructure in place (anytime soon) to generate licenses using the production // private key, these tests are blacklisted in non-snapshot test runs @@ -81,7 +87,6 @@ tasks.named("precommit").configure { } tasks.named("yamlRestCompatTestTransform").configure({ task -> - task.skipTest("security/10_forbidden/Test bulk response with invalid credentials", "warning does not exist for compatibility") task.skipTest("esql/60_usage/Basic ESQL usage output (telemetry)", "The telemetry output changed. We dropped a column. That's safe.") task.skipTest("inference/inference_crud/Test get all", "Assertions on number of inference models break due to default configs") task.skipTest("esql/60_usage/Basic ESQL usage output (telemetry) snapshot version", "The number of functions is constantly increasing") @@ -89,5 +94,6 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") + task.skipTest("esql/61_enrich_ip/Invalid IP strings", "We switched from exceptions to null+warnings for ENRICH runtime errors") }) diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index 583ad5d8c3df3..d5bc38d2e8dd5 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.java' @@ -10,6 +15,6 @@ subprojects { tasks.withType(Test).configureEach { // These fail in CI but only when run as part of checkPart2 and not individually. // Tracked in : https://github.com/elastic/elasticsearch/issues/66661 - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index da39d221f92f1..86f974ed13359 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -1,5 +1,16 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.internal-testclusters' @@ -11,6 +22,8 @@ dependencies { testImplementation project(':x-pack:plugin:ccr:qa') } +def clusterPath = getPath() + def leaderCluster = testClusters.register("leader-cluster") { testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' @@ -24,7 +37,19 @@ def followCluster = testClusters.register("follow-cluster") { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' user username: 'admin', password: 'admin-password', role: 'superuser' - setting 'cluster.remote.leader_cluster.seeds', { "\"${leaderCluster.get().getAllTransportPortURI().join(",")}\"" }, IGNORE_VALUE + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + } + def leaderUris = leaderInfo.map { it.getAllTransportPortURI() } + + setting 'cluster.remote.leader_cluster.seeds', + { "\"${leaderUris.get().join(",")}\"" }, IGNORE_VALUE } tasks.register("leader-cluster", RestIntegTestTask) { @@ -41,7 +66,7 @@ tasks.register("writeJavaPolicy") { policyFile.write( [ "grant {", - " permission java.io.FilePermission \"${-> testClusters."follow-cluster".getFirstNode().getServerLog()}\", \"read\";", + " permission java.io.FilePermission \"${-> followCluster.map { it.getFirstNode().getServerLog() }.get()}\", \"read\";", "};" ].join("\n") ) @@ -50,11 +75,28 @@ tasks.register("writeJavaPolicy") { tasks.register("follow-cluster", RestIntegTestTask) { dependsOn 'writeJavaPolicy', "leader-cluster" - useCluster leaderCluster - systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'java.security.policy', "file://${policyFile}" - nonInputProperties.systemProperty 'tests.leader_host', leaderCluster.map(c -> c.allHttpSocketURI.get(0)) - nonInputProperties.systemProperty 'log', followCluster.map(c -> c.getFirstNode().getServerLog()) + useCluster leaderCluster + systemProperty 'tests.target_cluster', 'follow' + nonInputProperties.systemProperty 'java.security.policy', "file://${policyFile}" + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + } + def followInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("follow-cluster") + it.parameters.service = serviceProvider + } + def leaderUri = leaderInfo.map { it.getAllHttpSocketURI().get(0) } + def followerUri = followInfo.map { it.getAllHttpSocketURI().get(0) } + + nonInputProperties.systemProperty 'tests.leader_host', leaderUri + nonInputProperties.systemProperty 'log', followCluster.map(c -> c.getFirstNode().getServerLog()) } tasks.named("check").configure { dependsOn "follow-cluster" } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 2475a56aa87aa..61678784e6b38 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -1,6 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.internal-testclusters' @@ -12,6 +23,7 @@ dependencies { testImplementation project(':x-pack:plugin:ccr:qa') } +def clusterPath = getPath() def leaderCluster = testClusters.register('leader-cluster') { testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' @@ -21,12 +33,23 @@ def leaderCluster = testClusters.register('leader-cluster') { } def middleCluster = testClusters.register('middle-cluster') { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - user username: 'admin', password: 'admin-password', role: 'superuser' - setting 'cluster.remote.leader_cluster.seeds', - { "\"${leaderCluster.get().getAllTransportPortURI().join(",")}\"" }, IGNORE_VALUE + testDistribution = 'DEFAULT' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + user username: 'admin', password: 'admin-password', role: 'superuser' + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + } + def leaderUris = leaderInfo.map { it.getAllTransportPortURI() } + setting 'cluster.remote.leader_cluster.seeds', + { "\"${leaderUris.get().join(",")}\"" }, IGNORE_VALUE } tasks.register("leader-cluster", RestIntegTestTask) { @@ -40,30 +63,74 @@ tasks.register("middle-cluster", RestIntegTestTask) { useCluster testClusters.named("leader-cluster") systemProperty 'tests.target_cluster', 'middle' systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" - nonInputProperties.systemProperty 'tests.leader_host',leaderCluster.map(c -> c.allHttpSocketURI.get(0)) -} + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + + def leaderUri = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.get(0) } + nonInputProperties.systemProperty 'tests.leader_host', leaderUri +} tasks.register('follow-cluster', RestIntegTestTask) { dependsOn "leader-cluster", "middle-cluster" - useCluster leaderCluster - useCluster middleCluster - systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" - nonInputProperties.systemProperty 'tests.leader_host', leaderCluster.map(c -> c.allHttpSocketURI.get(0)) - nonInputProperties.systemProperty 'tests.middle_host', middleCluster.map(c -> c.allHttpSocketURI.get(0)) + useCluster leaderCluster + useCluster middleCluster + systemProperty 'tests.target_cluster', 'follow' + systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + + def leaderUri = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.get(0) } + + def middleUri = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("middle-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.get(0) } + nonInputProperties.systemProperty 'tests.leader_host', leaderUri + nonInputProperties.systemProperty 'tests.middle_host', middleUri } -testClusters.matching {it.name == "follow-cluster" }.configureEach { +testClusters.matching { it.name == "follow-cluster" }.configureEach { testDistribution = 'DEFAULT' setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' user username: 'admin', password: 'admin-password', role: 'superuser' + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderUris = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.getAllTransportPortURI() } + + def middleUris = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("middle-cluster") + it.parameters.service = serviceProvider + }.map { it.getAllTransportPortURI() } + setting 'cluster.remote.leader_cluster.seeds', - { "\"${leaderCluster.get().getAllTransportPortURI().join(",")}\"" }, IGNORE_VALUE + { "\"${leaderUris.get().join(",")}\"" }, IGNORE_VALUE setting 'cluster.remote.middle_cluster.seeds', - { "\"${middleCluster.get().getAllTransportPortURI().join(",")}\"" }, IGNORE_VALUE + { "\"${middleUris.get().join(",")}\"" }, IGNORE_VALUE } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 53e068ae6126e..0bb4afe51b85a 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; @@ -366,8 +367,10 @@ public void testSyntheticSource() throws Exception { final String leaderIndexName = "synthetic_leader"; if ("leader".equals(targetCluster)) { logger.info("Running against leader cluster"); - createIndex(adminClient(), leaderIndexName, Settings.EMPTY, """ - "_source": {"mode": "synthetic"}, + Settings settings = Settings.builder() + .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC) + .build(); + createIndex(adminClient(), leaderIndexName, settings, """ "properties": {"kwd": {"type": "keyword"}}}""", null); for (int i = 0; i < numDocs; i++) { logger.info("Indexing doc [{}]", i); @@ -392,7 +395,6 @@ public void testSyntheticSource() throws Exception { } assertBusy(() -> { verifyDocuments(client(), followIndexName, numDocs); - assertMap(getIndexMappingAsMap(followIndexName), matchesMap().extraOk().entry("_source", Map.of("mode", "synthetic"))); if (overrideNumberOfReplicas) { assertMap(getIndexSettingsAsMap(followIndexName), matchesMap().extraOk().entry("index.number_of_replicas", "0")); } else { diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index 7661ea08b057d..ff342accef277 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -1,5 +1,9 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -10,6 +14,8 @@ dependencies { testImplementation project(':x-pack:plugin:ccr:qa:') } +def clusterPath = getPath() + def leaderCluster = testClusters.register('leader-cluster') { testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' @@ -21,8 +27,20 @@ def followerCluster = testClusters.register('follow-cluster') { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' user username: 'admin', password: 'admin-password', role: 'superuser' + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + } + def leaderUris = leaderInfo.map { it.getAllTransportPortURI() } + setting 'cluster.remote.leader_cluster.seeds', - { "\"${leaderCluster.get().getAllTransportPortURI().join(",")}\"" }, IGNORE_VALUE + { "\"${leaderUris.get().join(",")}\"" }, IGNORE_VALUE } tasks.register('leader-cluster', RestIntegTestTask) { @@ -34,7 +52,19 @@ tasks.register('follow-cluster', RestIntegTestTask) { dependsOn 'leader-cluster' useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', followerCluster.map(c -> c.allHttpSocketURI.get(0)) + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def followInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("follow-cluster") + it.parameters.service = serviceProvider + } + def followUri = followInfo.map { it.allHttpSocketURI.get(0) } + + nonInputProperties.systemProperty 'tests.leader_host', followUri } tasks.named("check").configure { dependsOn "follow-cluster" } diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index 47d37801e2dcf..848beb1da10ae 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -1,6 +1,10 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -9,6 +13,8 @@ dependencies { testImplementation project(':x-pack:plugin:ccr:qa') } +def clusterPath = getPath() + def leaderCluster = testClusters.register('leader-cluster') { testDistribution = 'DEFAULT' setting 'xpack.license.self_generated.type', 'trial' @@ -22,12 +28,23 @@ def followCluster = testClusters.register('follow-cluster') { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' user username: 'admin', password: 'admin-password', role: 'superuser' + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + } + def leaderUri = leaderInfo.map { it.getAllTransportPortURI().get(0) } + setting 'cluster.remote.leader_cluster.seeds', - { "\"${leaderCluster.get().getAllTransportPortURI().get(0)}\"" }, IGNORE_VALUE + { "\"${leaderUri.get()}\"" }, IGNORE_VALUE nameCustomization = { 'follow' } } - tasks.register('leader-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.target_cluster', 'leader' @@ -37,8 +54,19 @@ tasks.register('follow-cluster', RestIntegTestTask) { dependsOn 'leader-cluster' useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderUri = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.get(0) } + nonInputProperties.systemProperty 'tests.leader_host', - "${-> leaderCluster.get().getAllHttpSocketURI().get(0)}" + "${-> leaderUri.get() }" } tasks.register("followClusterRestartTest", StandaloneRestIntegTestTask) { @@ -48,10 +76,27 @@ tasks.register("followClusterRestartTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.load_packaged', 'false' systemProperty 'tests.target_cluster', 'follow-restart' + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderUri = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.get(0) } + + def followUris = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("follow-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.join(",") } + + nonInputProperties.systemProperty 'tests.leader_host', leaderUri + nonInputProperties.systemProperty 'tests.rest.cluster', followUris + doFirst { - followCluster.get().restart() - nonInputProperties.systemProperty 'tests.leader_host', leaderCluster.map(c-> c.getAllHttpSocketURI().get(0)) - nonInputProperties.systemProperty 'tests.rest.cluster', followCluster.map(c -> c.getAllHttpSocketURI().join(",")) + serviceProvider.get().restart(clusterPath, "follow-cluster") } } diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index 5515aefeaa091..454a9ae721736 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -1,4 +1,9 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.internal-testclusters' @@ -10,26 +15,38 @@ dependencies { testImplementation project(':x-pack:plugin:ccr:qa') } +def clusterPath = getPath() + def leadCluster = testClusters.register('leader-cluster') { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - extraConfigFile 'roles.yml', file('leader-roles.yml') - user username: "test_admin", role: "superuser" - user username: "test_ccr", role: "ccruser" + testDistribution = 'DEFAULT' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + extraConfigFile 'roles.yml', file('leader-roles.yml') + user username: "test_admin", role: "superuser" + user username: "test_ccr", role: "ccruser" } testClusters.register('follow-cluster') { - testDistribution = 'DEFAULT' - setting 'cluster.remote.leader_cluster.seeds', { - "\"${leadCluster.get().getAllTransportPortURI().join(",")}\"" - }, IGNORE_VALUE - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.monitoring.collection.enabled', 'false' // will be enabled by tests - extraConfigFile 'roles.yml', file('follower-roles.yml') - user username: "test_admin", role: "superuser" - user username: "test_ccr", role: "ccruser" + testDistribution = 'DEFAULT' + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderUris = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.AllTransportPortURI } + + setting 'cluster.remote.leader_cluster.seeds', { + "\"${leaderUris.get().join(",")}\"" + }, IGNORE_VALUE + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.collection.enabled', 'false' // will be enabled by tests + extraConfigFile 'roles.yml', file('follower-roles.yml') + user username: "test_admin", role: "superuser" + user username: "test_ccr", role: "ccruser" } tasks.register('leader-cluster', RestIntegTestTask) { @@ -41,7 +58,17 @@ def followerClusterTestTask = tasks.register('follow-cluster', RestIntegTestTask dependsOn 'leader-cluster' useCluster leadCluster systemProperty 'tests.target_cluster', 'follow' - nonInputProperties.systemProperty 'tests.leader_host', leadCluster.map(c-> c.getAllHttpSocketURI().get(0)) + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + def leaderUri = project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("leader-cluster") + it.parameters.service = serviceProvider + }.map { it.allHttpSocketURI.get(0) } + + nonInputProperties.systemProperty 'tests.leader_host', leaderUri } tasks.named("check").configure { dependsOn(followerClusterTestTask) } diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index fb4acb0055a8c..51d770936e64e 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -1,7 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.Version - import java.nio.file.Paths apply plugin: 'elasticsearch.internal-es-plugin' @@ -65,7 +70,7 @@ dependencies { testImplementation project(path: ':modules:rest-root') testImplementation project(path: ':modules:health-shards-availability') // Needed for Fips140ProviderVerificationTests - testCompileOnly('org.bouncycastle:bc-fips:1.0.2.4') + testCompileOnly('org.bouncycastle:bc-fips:1.0.2.5') testImplementation(project(':x-pack:license-tools')) { transitive = false @@ -94,7 +99,7 @@ tasks.named("processResources").configure { String licenseKey = providers.systemProperty("license.key").getOrNull() if (licenseKey != null) { println "Using provided license key from ${licenseKey}" - } else if (BuildParams.isSnapshotBuild()) { + } else if (buildParams.isSnapshotBuild()) { licenseKey = Paths.get(project.projectDir.path, 'snapshot.key') } else { throw new IllegalArgumentException('Property license.key must be set for release build') @@ -155,13 +160,13 @@ testClusters.configureEach { requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.15.0") } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.withType(Test).configureEach { systemProperty 'es.failure_store_feature_flag_enabled', 'true' } } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // Test clusters run with security disabled tasks.named("javaRestTest").configure { enabled = false } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java index 56c8e87d1c502..d86c15aa14bc9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensedFeature.java @@ -136,11 +136,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; LicensedFeature that = (LicensedFeature) o; - return Objects.equals(name, that.name); + return Objects.equals(name, that.name) && Objects.equals(family, that.family); } @Override public int hashCode() { - return Objects.hash(name); + return Objects.hash(name, family); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java index 06393dfa3bade..155ea0ffdcbc3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiUsageTransportAction.java @@ -13,8 +13,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.stats.HealthApiStatsAction; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -30,10 +28,7 @@ */ public class HealthApiUsageTransportAction extends XPackUsageFeatureTransportAction { - static final NodeFeature SUPPORTS_HEALTH_STATS = new NodeFeature("health.supports_health_stats"); - private final Client client; - private final FeatureService featureService; @Inject public HealthApiUsageTransportAction( @@ -42,8 +37,7 @@ public HealthApiUsageTransportAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, - FeatureService featureService + Client client ) { super( XPackUsageFeatureAction.HEALTH.name(), @@ -54,7 +48,6 @@ public HealthApiUsageTransportAction( indexNameExpressionResolver ); this.client = client; - this.featureService = featureService; } @Override @@ -70,7 +63,7 @@ protected void masterOperation( client.threadPool().getThreadContext() ); - if (state.clusterRecovered() && featureService.clusterHasFeature(state, SUPPORTS_HEALTH_STATS)) { + if (state.clusterRecovered()) { HealthApiStatsAction.Request statsRequest = new HealthApiStatsAction.Request(); statsRequest.setParentTask(clusterService.localNode().getId(), task.getId()); client.execute(HealthApiStatsAction.INSTANCE, statsRequest, preservingListener.delegateFailureAndWrap((l, r) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java index b885a90c30e57..f966bf97f4764 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatures.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.core; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.license.License; import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; -import java.util.Map; import java.util.Set; /** @@ -32,9 +30,4 @@ public Set getFeatures() { LOGSDB_TELMETRY_STATS ); } - - @Override - public Map getHistoricalFeatures() { - return Map.of(HealthApiUsageTransportAction.SUPPORTS_HEALTH_STATS, Version.V_8_7_0); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 72e8805e96fc4..6aef618288fd2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -7,12 +7,16 @@ package org.elasticsearch.xpack.core; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.ssl.SslClientAuthenticationMode; import org.elasticsearch.common.ssl.SslVerificationMode; import org.elasticsearch.core.Strings; +import org.elasticsearch.plugins.Platforms; import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.authc.support.Hasher; @@ -26,6 +30,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import javax.crypto.SecretKeyFactory; @@ -40,6 +45,8 @@ */ public class XPackSettings { + private static final Logger logger = LogManager.getLogger(XPackSettings.class); + private XPackSettings() { throw new IllegalStateException("Utility class should not be instantiated"); } @@ -76,10 +83,21 @@ public Iterator> settings() { /** Setting for enabling or disabling graph. Defaults to true. */ public static final Setting GRAPH_ENABLED = Setting.boolSetting("xpack.graph.enabled", true, Setting.Property.NodeScope); - /** Setting for enabling or disabling machine learning. Defaults to true. */ + public static final Set ML_NATIVE_CODE_PLATFORMS = Set.of("darwin-aarch64", "linux-aarch64", "linux-x86_64", "windows-x86_64"); + + /** Setting for enabling or disabling machine learning. Defaults to true on platforms that have the ML native code available. */ public static final Setting MACHINE_LEARNING_ENABLED = Setting.boolSetting( "xpack.ml.enabled", - true, + ML_NATIVE_CODE_PLATFORMS.contains(Platforms.PLATFORM_NAME), + enabled -> { + if (enabled && ML_NATIVE_CODE_PLATFORMS.contains(Platforms.PLATFORM_NAME) == false) { + SettingsException e = new SettingsException("xpack.ml.enabled cannot be set to [true] on [{}]", Platforms.PLATFORM_NAME); + // The exception doesn't get logged nicely on the console because it's thrown during initial plugin loading, + // so log separately here to make absolutely clear what happened + logger.fatal(e.getMessage()); + throw e; + } + }, Setting.Property.NodeScope ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportSetUpgradeModeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportSetUpgradeModeAction.java new file mode 100644 index 0000000000000..bbd90448cf855 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportSetUpgradeModeAction.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.SimpleBatchedExecutor; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.atomic.AtomicBoolean; + +public abstract class AbstractTransportSetUpgradeModeAction extends AcknowledgedTransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(AbstractTransportSetUpgradeModeAction.class); + private final AtomicBoolean isRunning = new AtomicBoolean(false); + private final MasterServiceTaskQueue taskQueue; + + public AbstractTransportSetUpgradeModeAction( + String actionName, + String taskQueuePrefix, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + transportService, + clusterService, + threadPool, + actionFilters, + SetUpgradeModeActionRequest::new, + indexNameExpressionResolver, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + + this.taskQueue = clusterService.createTaskQueue(taskQueuePrefix + " upgrade mode", Priority.NORMAL, new UpdateModeExecutor()); + } + + @Override + protected void masterOperation( + Task task, + SetUpgradeModeActionRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + // Don't want folks spamming this endpoint while it is in progress, only allow one request to be handled at a time + if (isRunning.compareAndSet(false, true) == false) { + String msg = Strings.format( + "Attempted to set [upgrade_mode] for feature name [%s] to [%s] from [%s] while previous request was processing.", + featureName(), + request.enabled(), + upgradeMode(state) + ); + logger.info(msg); + Exception detail = new IllegalStateException(msg); + listener.onFailure( + new ElasticsearchStatusException( + "Cannot change [upgrade_mode] for feature name [{}]. Previous request is still being processed.", + RestStatus.TOO_MANY_REQUESTS, + detail, + featureName() + ) + ); + return; + } + + // Noop, nothing for us to do, simply return fast to the caller + var upgradeMode = upgradeMode(state); + if (request.enabled() == upgradeMode) { + logger.info("Upgrade mode noop"); + isRunning.set(false); + listener.onResponse(AcknowledgedResponse.TRUE); + return; + } + + logger.info( + "Starting to set [upgrade_mode] for feature name [{}] to [{}] from [{}]", + featureName(), + request.enabled(), + upgradeMode + ); + + ActionListener wrappedListener = ActionListener.wrap(r -> { + logger.info("Finished setting [upgrade_mode] for feature name [{}]", featureName()); + isRunning.set(false); + listener.onResponse(r); + }, e -> { + logger.info("Failed to set [upgrade_mode] for feature name [{}]", featureName()); + isRunning.set(false); + listener.onFailure(e); + }); + + ActionListener setUpgradeModeListener = wrappedListener.delegateFailure((delegate, ack) -> { + if (ack.isAcknowledged()) { + upgradeModeSuccessfullyChanged(task, request, state, delegate); + } else { + logger.info("Cluster state update is NOT acknowledged"); + wrappedListener.onFailure(new ElasticsearchTimeoutException("Unknown error occurred while updating cluster state")); + } + }); + + taskQueue.submitTask(featureName(), new UpdateModeStateListener(request, setUpgradeModeListener), request.ackTimeout()); + } + + /** + * Define the feature name, used in log messages and naming the task on the task queue. + */ + protected abstract String featureName(); + + /** + * Parse the ClusterState for the implementation's {@link org.elasticsearch.cluster.metadata.Metadata.Custom} and find the upgradeMode + * boolean stored there. We will compare this boolean with the request's desired state to determine if we should change the metadata. + */ + protected abstract boolean upgradeMode(ClusterState state); + + /** + * This is called from the ClusterState updater and is expected to return quickly. + */ + protected abstract ClusterState createUpdatedState(SetUpgradeModeActionRequest request, ClusterState state); + + /** + * This method is only called when the cluster state was successfully changed. + * If we failed to update for any reason, this will not be called. + * The ClusterState param is the previous ClusterState before we called update. + */ + protected abstract void upgradeModeSuccessfullyChanged( + Task task, + SetUpgradeModeActionRequest request, + ClusterState state, + ActionListener listener + ); + + @Override + protected ClusterBlockException checkBlock(SetUpgradeModeActionRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + private record UpdateModeStateListener(SetUpgradeModeActionRequest request, ActionListener listener) + implements + ClusterStateTaskListener { + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + + private class UpdateModeExecutor extends SimpleBatchedExecutor { + @Override + public Tuple executeTask(UpdateModeStateListener clusterStateListener, ClusterState clusterState) { + return Tuple.tuple(createUpdatedState(clusterStateListener.request(), clusterState), null); + } + + @Override + public void taskSucceeded(UpdateModeStateListener clusterStateListener, Void unused) { + clusterStateListener.listener().onResponse(AcknowledgedResponse.TRUE); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetUpgradeModeActionRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetUpgradeModeActionRequest.java new file mode 100644 index 0000000000000..98e30b284c21a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetUpgradeModeActionRequest.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class SetUpgradeModeActionRequest extends AcknowledgedRequest implements ToXContentObject { + + private final boolean enabled; + + private static final ParseField ENABLED = new ParseField("enabled"); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "set_upgrade_mode_action_request", + a -> new SetUpgradeModeActionRequest((Boolean) a[0]) + ); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED); + } + + public SetUpgradeModeActionRequest(boolean enabled) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + this.enabled = enabled; + } + + public SetUpgradeModeActionRequest(StreamInput in) throws IOException { + super(in); + this.enabled = in.readBoolean(); + } + + public boolean enabled() { + return enabled; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(enabled); + } + + @Override + public int hashCode() { + return Objects.hash(enabled); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + SetUpgradeModeActionRequest other = (SetUpgradeModeActionRequest) obj; + return enabled == other.enabled(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED.getPreferredName(), enabled); + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java index 2758ef73a98da..b32e95c5fc9d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/LogsDBFeatureSetUsage.java @@ -22,6 +22,7 @@ public final class LogsDBFeatureSetUsage extends XPackFeatureUsage { private final int indicesWithSyntheticSource; private final long numDocs; private final long sizeInBytes; + private final boolean hasCustomCutoffDate; public LogsDBFeatureSetUsage(StreamInput input) throws IOException { super(input); @@ -34,6 +35,13 @@ public LogsDBFeatureSetUsage(StreamInput input) throws IOException { numDocs = 0; sizeInBytes = 0; } + var transportVersion = input.getTransportVersion(); + if (transportVersion.isPatchFrom(TransportVersions.LOGSDB_TELEMETRY_CUSTOM_CUTOFF_DATE_FIX_8_17) + || transportVersion.onOrAfter(TransportVersions.LOGSDB_TELEMETRY_CUSTOM_CUTOFF_DATE)) { + hasCustomCutoffDate = input.readBoolean(); + } else { + hasCustomCutoffDate = false; + } } @Override @@ -45,6 +53,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(numDocs); out.writeVLong(sizeInBytes); } + var transportVersion = out.getTransportVersion(); + if (transportVersion.isPatchFrom(TransportVersions.LOGSDB_TELEMETRY_CUSTOM_CUTOFF_DATE_FIX_8_17) + || transportVersion.onOrAfter(TransportVersions.LOGSDB_TELEMETRY_CUSTOM_CUTOFF_DATE)) { + out.writeBoolean(hasCustomCutoffDate); + } } public LogsDBFeatureSetUsage( @@ -53,13 +66,15 @@ public LogsDBFeatureSetUsage( int indicesCount, int indicesWithSyntheticSource, long numDocs, - long sizeInBytes + long sizeInBytes, + boolean hasCustomCutoffDate ) { super(XPackField.LOGSDB, available, enabled); this.indicesCount = indicesCount; this.indicesWithSyntheticSource = indicesWithSyntheticSource; this.numDocs = numDocs; this.sizeInBytes = sizeInBytes; + this.hasCustomCutoffDate = hasCustomCutoffDate; } @Override @@ -74,11 +89,12 @@ protected void innerXContent(XContentBuilder builder, Params params) throws IOEx builder.field("indices_with_synthetic_source", indicesWithSyntheticSource); builder.field("num_docs", numDocs); builder.field("size_in_bytes", sizeInBytes); + builder.field("has_custom_cutoff_date", hasCustomCutoffDate); } @Override public int hashCode() { - return Objects.hash(available, enabled, indicesCount, indicesWithSyntheticSource, numDocs, sizeInBytes); + return Objects.hash(available, enabled, indicesCount, indicesWithSyntheticSource, numDocs, sizeInBytes, hasCustomCutoffDate); } @Override @@ -95,6 +111,7 @@ public boolean equals(Object obj) { && Objects.equals(indicesCount, other.indicesCount) && Objects.equals(indicesWithSyntheticSource, other.indicesWithSyntheticSource) && Objects.equals(numDocs, other.numDocs) - && Objects.equals(sizeInBytes, other.sizeInBytes); + && Objects.equals(sizeInBytes, other.sizeInBytes) + && Objects.equals(hasCustomCutoffDate, other.hasCustomCutoffDate); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java index 33a3f2424c90c..4772277ae2375 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/time/RemainingTime.java @@ -18,8 +18,13 @@ public interface RemainingTime extends Supplier { * Create a {@link Supplier} that returns a decreasing {@link TimeValue} on each invocation, representing the amount of time until * the call times out. The timer starts when this method is called and counts down from remainingTime to 0. * currentTime should return the most up-to-date system time, for example Instant.now() or Clock.instant(). + * {@link TimeValue#MAX_VALUE} is a special case where the remaining time is always TimeValue.MAX_VALUE. */ static RemainingTime from(Supplier currentTime, TimeValue remainingTime) { + if (remainingTime.equals(TimeValue.MAX_VALUE)) { + return () -> TimeValue.MAX_VALUE; + } + var timeout = currentTime.get().plus(remainingTime.duration(), remainingTime.timeUnit().toChronoUnit()); var maxRemainingTime = remainingTime.nanos(); return () -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java index cd44aaafbfae2..05eb7551330b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java @@ -130,11 +130,11 @@ public boolean equals(Object obj) { * still result in unique snapshot names. */ public static String generateSnapshotName(String name) { - return generateSnapshotName(name, new IndexNameExpressionResolver.ResolverContext()); + return generateSnapshotName(name, System.currentTimeMillis()); } - public static String generateSnapshotName(String name, IndexNameExpressionResolver.Context context) { - String candidate = IndexNameExpressionResolver.resolveDateMathExpression(name, context.getStartTime()); + public static String generateSnapshotName(String name, long now) { + String candidate = IndexNameExpressionResolver.resolveDateMathExpression(name, now); // TODO: we are breaking the rules of UUIDs by lowercasing this here, find an alternative (snapshot names must be lowercase) return candidate + "-" + UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java index f8cb371687d72..26f4f5c92073c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java @@ -58,10 +58,22 @@ public class IndexLifecycleMetadata implements Metadata.Custom { private final Map policyMetadatas; private final OperationMode operationMode; + // a slightly different view of the policyMetadatas -- it's hot in a couple of places so we pre-calculate it + private final Map policies; + + private static Map policiesMap(final Map policyMetadatas) { + final Map policies = new HashMap<>(policyMetadatas.size()); + for (LifecyclePolicyMetadata policyMetadata : policyMetadatas.values()) { + LifecyclePolicy policy = policyMetadata.getPolicy(); + policies.put(policy.getName(), policy); + } + return Collections.unmodifiableMap(policies); + } public IndexLifecycleMetadata(Map policies, OperationMode operationMode) { this.policyMetadatas = Collections.unmodifiableMap(policies); this.operationMode = operationMode; + this.policies = policiesMap(policyMetadatas); } public IndexLifecycleMetadata(StreamInput in) throws IOException { @@ -72,6 +84,7 @@ public IndexLifecycleMetadata(StreamInput in) throws IOException { } this.policyMetadatas = policies; this.operationMode = in.readEnum(OperationMode.class); + this.policies = policiesMap(policyMetadatas); } @Override @@ -93,13 +106,7 @@ public OperationMode getOperationMode() { } public Map getPolicies() { - // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph - Map policies = new HashMap<>(policyMetadatas.size()); - for (LifecyclePolicyMetadata policyMetadata : policyMetadatas.values()) { - LifecyclePolicy policy = policyMetadata.getPolicy(); - policies.put(policy.getName(), policy); - } - return Collections.unmodifiableMap(policies); + return policies; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java index 821caf001f3e0..a67ae33e85801 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java @@ -7,17 +7,13 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.action.SetUpgradeModeActionRequest; import java.io.IOException; -import java.util.Objects; public class SetUpgradeModeAction extends ActionType { @@ -28,9 +24,7 @@ private SetUpgradeModeAction() { super(NAME); } - public static class Request extends AcknowledgedRequest implements ToXContentObject { - - private final boolean enabled; + public static class Request extends SetUpgradeModeActionRequest { private static final ParseField ENABLED = new ParseField("enabled"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -43,48 +37,11 @@ public static class Request extends AcknowledgedRequest implements ToXC } public Request(boolean enabled) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); - this.enabled = enabled; + super(enabled); } public Request(StreamInput in) throws IOException { super(in); - this.enabled = in.readBoolean(); - } - - public boolean isEnabled() { - return enabled; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(enabled); - } - - @Override - public int hashCode() { - return Objects.hash(enabled); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null || obj.getClass() != getClass()) { - return false; - } - Request other = (Request) obj; - return Objects.equals(enabled, other.enabled); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(ENABLED.getPreferredName(), enabled); - builder.endObject(); - return builder; } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java index f4ac89124cddb..68e0f7e1ac885 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java @@ -76,12 +76,6 @@ public TextExpansionConfig( this.vocabularyConfig = Optional.ofNullable(vocabularyConfig) .orElse(new VocabularyConfig(InferenceIndexConstants.nativeDefinitionStore())); this.tokenization = tokenization == null ? Tokenization.createDefault() : tokenization; - if (this.tokenization instanceof BertTokenization == false) { - throw ExceptionsHelper.badRequestException( - "text expansion models must be configured with BERT tokenizer, [{}] given", - this.tokenization.getName() - ); - } this.resultsField = resultsField; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java index 83f7832645270..ad8a55a5f8443 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java @@ -51,7 +51,7 @@ public static int countInferenceProcessors(ClusterState state) { } Counter counter = Counter.newCounter(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { - Map configMap = configuration.getConfigAsMap(); + Map configMap = configuration.getConfig(); List> processorConfigs = (List>) configMap.get(PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { @@ -73,7 +73,7 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge Set modelIds = new LinkedHashSet<>(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { - Map configMap = configuration.getConfigAsMap(); + Map configMap = configuration.getConfig(); List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { @@ -100,7 +100,7 @@ public static Map> pipelineIdsByResource(ClusterState state, return pipelineIdsByModelIds; } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { - Map configMap = configuration.getConfigAsMap(); + Map configMap = configuration.getConfig(); List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { @@ -131,7 +131,7 @@ public static Set pipelineIdsForResource(ClusterState state, Set return pipelineIds; } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { - Map configMap = configuration.getConfigAsMap(); + Map configMap = configuration.getConfig(); List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java index b9d39aa665848..c94b90b6c0c23 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -8,11 +8,15 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.time.LocalDateTime; +import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.temporal.ChronoField; import java.util.Calendar; import java.util.Iterator; import java.util.Locale; @@ -232,6 +236,8 @@ public class Cron implements ToXContentFragment { private final String expression; + private ZoneId timeZone; + private transient TreeSet seconds; private transient TreeSet minutes; private transient TreeSet hours; @@ -246,7 +252,20 @@ public class Cron implements ToXContentFragment { private transient boolean nearestWeekday = false; private transient int lastdayOffset = 0; - public static final int MAX_YEAR = Calendar.getInstance(UTC, Locale.ROOT).get(Calendar.YEAR) + 100; + // Restricted to 50 years as the tzdb only has correct DST transition information for countries using a lunar calendar + // for the next ~60 years + public static final int MAX_YEAR = Calendar.getInstance(UTC, Locale.ROOT).get(Calendar.YEAR) + 50; + + public Cron(String expression, ZoneId timeZone) { + this.timeZone = timeZone; + assert expression != null : "cron expression cannot be null"; + this.expression = expression.toUpperCase(Locale.ROOT); + try { + buildExpression(this.expression); + } catch (Exception e) { + throw illegalArgument("invalid cron expression [{}]", e, expression); + } + } /** * Constructs a new CronExpression based on the specified @@ -259,13 +278,7 @@ public class Cron implements ToXContentFragment { * CronExpression */ public Cron(String expression) { - assert expression != null : "cron expression cannot be null"; - this.expression = expression.toUpperCase(Locale.ROOT); - try { - buildExpression(this.expression); - } catch (Exception e) { - throw illegalArgument("invalid cron expression [{}]", e, expression); - } + this(expression, UTC.toZoneId()); } /** @@ -275,7 +288,11 @@ public Cron(String expression) { * @param cron The existing cron expression to be copied */ public Cron(Cron cron) { - this(cron.expression); + this(cron.expression, cron.timeZone); + } + + public void setTimeZone(ZoneId timeZone) { + this.timeZone = timeZone; } /** @@ -286,31 +303,25 @@ public Cron(Cron cron) { * a time that is previous to the given time) * @return the next valid time (since the epoch) */ + @SuppressForbidden(reason = "In this case, the DST ambiguity of the atZone method is desired, understood and tested") public long getNextValidTimeAfter(final long time) { - // Computation is based on Gregorian year only. - Calendar cl = new java.util.GregorianCalendar(UTC, Locale.ROOT); - - // move ahead one second, since we're computing the time *after* the - // given time - final long afterTime = time + 1000; - // CronTrigger does not deal with milliseconds - cl.setTimeInMillis(afterTime); - cl.set(Calendar.MILLISECOND, 0); + LocalDateTime afterTimeLdt = LocalDateTime.ofInstant(java.time.Instant.ofEpochMilli(time), timeZone).plusSeconds(1); + LocalDateTimeLegacyWrapper cl = new LocalDateTimeLegacyWrapper(afterTimeLdt.with(ChronoField.MILLI_OF_SECOND, 0)); boolean gotOne = false; // loop until we've computed the next time, or we've past the endTime while (gotOne == false) { - if (cl.get(Calendar.YEAR) > 2999) { // prevent endless loop... + if (cl.getYear() > 2999) { // prevent endless loop... return -1; } SortedSet st = null; int t = 0; - int sec = cl.get(Calendar.SECOND); - int min = cl.get(Calendar.MINUTE); + int sec = cl.getSecond(); + int min = cl.getMinute(); // get second................................................. st = seconds.tailSet(sec); @@ -319,12 +330,12 @@ public long getNextValidTimeAfter(final long time) { } else { sec = seconds.first(); min++; - cl.set(Calendar.MINUTE, min); + cl.setMinute(min); } - cl.set(Calendar.SECOND, sec); + cl.setSecond(sec); - min = cl.get(Calendar.MINUTE); - int hr = cl.get(Calendar.HOUR_OF_DAY); + min = cl.getMinute(); + int hr = cl.getHour(); t = -1; // get minute................................................. @@ -337,15 +348,15 @@ public long getNextValidTimeAfter(final long time) { hr++; } if (min != t) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, min); - setCalendarHour(cl, hr); + cl.setSecond(0); + cl.setMinute(min); + cl.setHour(hr); continue; } - cl.set(Calendar.MINUTE, min); + cl.setMinute(min); - hr = cl.get(Calendar.HOUR_OF_DAY); - int day = cl.get(Calendar.DAY_OF_MONTH); + hr = cl.getHour(); + int day = cl.getDayOfMonth(); t = -1; // get hour................................................... @@ -358,16 +369,16 @@ public long getNextValidTimeAfter(final long time) { day++; } if (hr != t) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.DAY_OF_MONTH, day); - setCalendarHour(cl, hr); + cl.setSecond(0); + cl.setMinute(0); + cl.setDayOfMonth(day); + cl.setHour(hr); continue; } - cl.set(Calendar.HOUR_OF_DAY, hr); + cl.setHour(hr); - day = cl.get(Calendar.DAY_OF_MONTH); - int mon = cl.get(Calendar.MONTH) + 1; + day = cl.getDayOfMonth(); + int mon = cl.getMonth() + 1; // '+ 1' because calendar is 0-based for this field, and we are // 1-based t = -1; @@ -381,32 +392,32 @@ public long getNextValidTimeAfter(final long time) { if (lastdayOfMonth) { if (nearestWeekday == false) { t = day; - day = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + day = getLastDayOfMonth(mon, cl.getYear()); day -= lastdayOffset; if (t > day) { mon++; if (mon > 12) { mon = 1; tmon = 3333; // ensure test of mon != tmon further below fails - cl.add(Calendar.YEAR, 1); + cl.plusYears(1); } day = 1; } } else { t = day; - day = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + day = getLastDayOfMonth(mon, cl.getYear()); day -= lastdayOffset; - Calendar tcal = Calendar.getInstance(UTC, Locale.ROOT); - tcal.set(Calendar.SECOND, 0); - tcal.set(Calendar.MINUTE, 0); - tcal.set(Calendar.HOUR_OF_DAY, 0); - tcal.set(Calendar.DAY_OF_MONTH, day); - tcal.set(Calendar.MONTH, mon - 1); - tcal.set(Calendar.YEAR, cl.get(Calendar.YEAR)); + LocalDateTimeLegacyWrapper tcal = new LocalDateTimeLegacyWrapper(LocalDateTime.now(timeZone)); + tcal.setSecond(0); + tcal.setMinute(0); + tcal.setHour(0); + tcal.setDayOfMonth(day); + tcal.setMonth(mon - 1); + tcal.setYear(cl.getYear()); - int ldom = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); - int dow = tcal.get(Calendar.DAY_OF_WEEK); + int ldom = getLastDayOfMonth(mon, cl.getYear()); + int dow = tcal.getDayOfWeek(); if (dow == Calendar.SATURDAY && day == 1) { day += 2; @@ -418,13 +429,12 @@ public long getNextValidTimeAfter(final long time) { day += 1; } - tcal.set(Calendar.SECOND, sec); - tcal.set(Calendar.MINUTE, min); - tcal.set(Calendar.HOUR_OF_DAY, hr); - tcal.set(Calendar.DAY_OF_MONTH, day); - tcal.set(Calendar.MONTH, mon - 1); - long nTime = tcal.getTimeInMillis(); - if (nTime < afterTime) { + tcal.setSecond(sec); + tcal.setMinute(min); + tcal.setHour(hr); + tcal.setDayOfMonth(day); + tcal.setMonth(mon - 1); + if (tcal.isBefore(afterTimeLdt)) { day = 1; mon++; } @@ -433,16 +443,16 @@ public long getNextValidTimeAfter(final long time) { t = day; day = daysOfMonth.first(); - Calendar tcal = Calendar.getInstance(UTC, Locale.ROOT); - tcal.set(Calendar.SECOND, 0); - tcal.set(Calendar.MINUTE, 0); - tcal.set(Calendar.HOUR_OF_DAY, 0); - tcal.set(Calendar.DAY_OF_MONTH, day); - tcal.set(Calendar.MONTH, mon - 1); - tcal.set(Calendar.YEAR, cl.get(Calendar.YEAR)); + LocalDateTimeLegacyWrapper tcal = new LocalDateTimeLegacyWrapper(LocalDateTime.now(timeZone)); + tcal.setSecond(0); + tcal.setMinute(0); + tcal.setHour(0); + tcal.setDayOfMonth(day); + tcal.setMonth(mon - 1); + tcal.setYear(cl.getYear()); - int ldom = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); - int dow = tcal.get(Calendar.DAY_OF_WEEK); + int ldom = getLastDayOfMonth(mon, cl.getYear()); + int dow = tcal.getDayOfWeek(); if (dow == Calendar.SATURDAY && day == 1) { day += 2; @@ -454,13 +464,12 @@ public long getNextValidTimeAfter(final long time) { day += 1; } - tcal.set(Calendar.SECOND, sec); - tcal.set(Calendar.MINUTE, min); - tcal.set(Calendar.HOUR_OF_DAY, hr); - tcal.set(Calendar.DAY_OF_MONTH, day); - tcal.set(Calendar.MONTH, mon - 1); - long nTime = tcal.getTimeInMillis(); - if (nTime < afterTime) { + tcal.setSecond(sec); + tcal.setMinute(min); + tcal.setHour(hr); + tcal.setDayOfMonth(day); + tcal.setMonth(mon - 1); + if (tcal.isAfter(afterTimeLdt)) { day = daysOfMonth.first(); mon++; } @@ -468,7 +477,7 @@ public long getNextValidTimeAfter(final long time) { t = day; day = st.first(); // make sure we don't over-run a short month, such as february - int lastDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + int lastDay = getLastDayOfMonth(mon, cl.getYear()); if (day > lastDay) { day = daysOfMonth.first(); mon++; @@ -479,11 +488,11 @@ public long getNextValidTimeAfter(final long time) { } if (day != t || mon != tmon) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, day); - cl.set(Calendar.MONTH, mon - 1); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(day); + cl.setMonth(mon - 1); // '- 1' because calendar is 0-based for this field, and we // are 1-based continue; @@ -493,7 +502,7 @@ public long getNextValidTimeAfter(final long time) { // the month? int dow = daysOfWeek.first(); // desired // d-o-w - int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w + int cDow = cl.getDayOfWeek(); // current d-o-w int daysToAdd = 0; if (cDow < dow) { daysToAdd = dow - cDow; @@ -502,15 +511,15 @@ public long getNextValidTimeAfter(final long time) { daysToAdd = dow + (7 - cDow); } - int lDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + int lDay = getLastDayOfMonth(mon, cl.getYear()); if (day + daysToAdd > lDay) { // did we already miss the // last one? - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, 1); - cl.set(Calendar.MONTH, mon); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(1); + cl.setMonth(mon); // no '- 1' here because we are promoting the month continue; } @@ -523,11 +532,11 @@ public long getNextValidTimeAfter(final long time) { day += daysToAdd; if (daysToAdd > 0) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, day); - cl.set(Calendar.MONTH, mon - 1); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(day); + cl.setMonth(mon - 1); // '- 1' here because we are not promoting the month continue; } @@ -536,7 +545,7 @@ public long getNextValidTimeAfter(final long time) { // are we looking for the Nth XXX day in the month? int dow = daysOfWeek.first(); // desired // d-o-w - int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w + int cDow = cl.getDayOfWeek(); // current d-o-w int daysToAdd = 0; if (cDow < dow) { daysToAdd = dow - cDow; @@ -557,25 +566,25 @@ public long getNextValidTimeAfter(final long time) { daysToAdd = (nthdayOfWeek - weekOfMonth) * 7; day += daysToAdd; - if (daysToAdd < 0 || day > getLastDayOfMonth(mon, cl.get(Calendar.YEAR))) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, 1); - cl.set(Calendar.MONTH, mon); + if (daysToAdd < 0 || day > getLastDayOfMonth(mon, cl.getYear())) { + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(1); + cl.setMonth(mon); // no '- 1' here because we are promoting the month continue; } else if (daysToAdd > 0 || dayShifted) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, day); - cl.set(Calendar.MONTH, mon - 1); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(day); + cl.setMonth(mon - 1); // '- 1' here because we are NOT promoting the month continue; } } else { - int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w + int cDow = cl.getDayOfWeek(); // current d-o-w int dow = daysOfWeek.first(); // desired // d-o-w st = daysOfWeek.tailSet(cDow); @@ -591,23 +600,23 @@ public long getNextValidTimeAfter(final long time) { daysToAdd = dow + (7 - cDow); } - int lDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + int lDay = getLastDayOfMonth(mon, cl.getYear()); if (day + daysToAdd > lDay) { // will we pass the end of // the month? - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, 1); - cl.set(Calendar.MONTH, mon); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(1); + cl.setMonth(mon); // no '- 1' here because we are promoting the month continue; } else if (daysToAdd > 0) { // are we swithing days? - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, day + daysToAdd); - cl.set(Calendar.MONTH, mon - 1); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(day + daysToAdd); + cl.setMonth(mon - 1); // '- 1' because calendar is 0-based for this field, // and we are 1-based continue; @@ -618,12 +627,12 @@ public long getNextValidTimeAfter(final long time) { // throw new UnsupportedOperationException( // "Support for specifying both a day-of-week AND a day-of-month parameter is not implemented."); } - cl.set(Calendar.DAY_OF_MONTH, day); + cl.setDayOfMonth(day); - mon = cl.get(Calendar.MONTH) + 1; + mon = cl.getMonth() + 1; // '+ 1' because calendar is 0-based for this field, and we are // 1-based - int year = cl.get(Calendar.YEAR); + int year = cl.getYear(); t = -1; // test for expressions that never generate a valid fire date, @@ -643,21 +652,21 @@ public long getNextValidTimeAfter(final long time) { year++; } if (mon != t) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, 1); - cl.set(Calendar.MONTH, mon - 1); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(1); + cl.setMonth(mon - 1); // '- 1' because calendar is 0-based for this field, and we are // 1-based - cl.set(Calendar.YEAR, year); + cl.setYear(year); continue; } - cl.set(Calendar.MONTH, mon - 1); + cl.setMonth(mon - 1); // '- 1' because calendar is 0-based for this field, and we are // 1-based - year = cl.get(Calendar.YEAR); + year = cl.getYear(); t = -1; // get year................................................... @@ -671,22 +680,24 @@ public long getNextValidTimeAfter(final long time) { } if (year != t) { - cl.set(Calendar.SECOND, 0); - cl.set(Calendar.MINUTE, 0); - cl.set(Calendar.HOUR_OF_DAY, 0); - cl.set(Calendar.DAY_OF_MONTH, 1); - cl.set(Calendar.MONTH, 0); + cl.setSecond(0); + cl.setMinute(0); + cl.setHour(0); + cl.setDayOfMonth(1); + cl.setMonth(0); // '- 1' because calendar is 0-based for this field, and we are // 1-based - cl.set(Calendar.YEAR, year); + cl.setYear(year); continue; } - cl.set(Calendar.YEAR, year); + cl.setYear(year); gotOne = true; } // while( done == false ) - return cl.getTimeInMillis(); + LocalDateTime nextRuntime = cl.getLocalDateTime(); + + return nextRuntime.atZone(timeZone).toInstant().toEpochMilli(); } public String expression() { @@ -735,7 +746,7 @@ public String getExpressionSummary() { @Override public int hashCode() { - return Objects.hash(expression); + return Objects.hash(expression, timeZone); } @Override @@ -747,7 +758,7 @@ public boolean equals(Object obj) { return false; } final Cron other = (Cron) obj; - return Objects.equals(this.expression, other.expression); + return Objects.equals(this.expression, other.expression) && Objects.equals(this.timeZone, other.timeZone); } /** @@ -757,7 +768,7 @@ public boolean equals(Object obj) { */ @Override public String toString() { - return expression; + return "Cron{" + "timeZone=" + timeZone + ", expression='" + expression + '\'' + '}'; } /** @@ -1430,7 +1441,7 @@ private static int getLastDayOfMonth(int monthNum, int year) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.value(toString()); + return builder.value(expression); } private static class ValueSet { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/LocalDateTimeLegacyWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/LocalDateTimeLegacyWrapper.java new file mode 100644 index 0000000000000..e540acc8042eb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/LocalDateTimeLegacyWrapper.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import java.time.LocalDateTime; +import java.time.chrono.ChronoLocalDateTime; + +/** + * This class is designed to wrap the LocalDateTime class in order to make it behave, in terms of mutation, like a legacy Calendar class. + * This is to provide compatibility with the existing Cron next runtime calculation algorithm which relies on certain quirks of the Calendar + * such as days of the week being numbered starting on Sunday==1 and being able to set the current hour to 24 and have it roll over to + * midnight the next day. + */ +public class LocalDateTimeLegacyWrapper { + + private LocalDateTime ldt; + + public LocalDateTimeLegacyWrapper(LocalDateTime ldt) { + this.ldt = ldt; + } + + public int getYear() { + return ldt.getYear(); + } + + public int getDayOfMonth() { + return ldt.getDayOfMonth(); + } + + public int getHour() { + return ldt.getHour(); + } + + public int getMinute() { + return ldt.getMinute(); + } + + public int getSecond() { + return ldt.getSecond(); + } + + public int getDayOfWeek() { + return (ldt.getDayOfWeek().getValue() % 7) + 1; + } + + public int getMonth() { + return ldt.getMonthValue() - 1; + } + + public void setYear(int year) { + ldt = ldt.withYear(year); + } + + public void setDayOfMonth(int dayOfMonth) { + var lengthOfMonth = ldt.getMonth().length(ldt.toLocalDate().isLeapYear()); + if (dayOfMonth <= lengthOfMonth) { + ldt = ldt.withDayOfMonth(dayOfMonth); + } else { + var months = dayOfMonth / lengthOfMonth; + var day = dayOfMonth % lengthOfMonth; + ldt = ldt.plusMonths(months).withDayOfMonth(day); + } + } + + public void setMonth(int month) { + month++; // Months are 0-based in Calendar + if (month <= 12) { + ldt = ldt.withMonth(month); + } else { + var years = month / 12; + var monthOfYear = month % 12; + ldt = ldt.plusYears(years).withMonth(monthOfYear); + } + } + + public void setHour(int hour) { + if (hour < 24) { + ldt = ldt.withHour(hour); + } else { + var days = hour / 24; + var hourOfDay = hour % 24; + ldt = ldt.plusDays(days).withHour(hourOfDay); + } + } + + public void setMinute(int minute) { + if (minute < 60) { + ldt = ldt.withMinute(minute); + } else { + var hours = minute / 60; + var minuteOfHour = minute % 60; + ldt = ldt.plusHours(hours).withMinute(minuteOfHour); + } + } + + public void setSecond(int second) { + if (second < 60) { + ldt = ldt.withSecond(second); + } else { + var minutes = second / 60; + var secondOfMinute = second % 60; + ldt = ldt.plusMinutes(minutes).withSecond(secondOfMinute); + } + } + + public void plusYears(long years) { + ldt = ldt.plusYears(years); + } + + public void plusSeconds(long seconds) { + ldt = ldt.plusSeconds(seconds); + } + + public boolean isAfter(ChronoLocalDateTime other) { + return ldt.isAfter(other); + } + + public boolean isBefore(ChronoLocalDateTime other) { + return ldt.isBefore(other); + } + + public LocalDateTime getLocalDateTime() { + return ldt; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 259e66f633bac..cc589b53eaa1a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -484,6 +484,22 @@ static RoleDescriptor kibanaSystem(String name) { // Endpoint heartbeat. Kibana reads from these to determine metering/billing for // endpoints. RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read", "create_index").build(), + // Security Solution workflows insights. Kibana creates, manages, and uses these + // to provide users with insights on potential configuration improvements + RoleDescriptor.IndicesPrivileges.builder() + .indices(".edr-workflow-insights-*") + .privileges( + "create_index", + "auto_configure", + "manage", + "read", + "write", + "delete", + TransportUpdateSettingsAction.TYPE.name(), + TransportPutMappingAction.TYPE.name(), + RolloverAction.NAME + ) + .build(), // For connectors telemetry. Will be removed once we switched to connectors API RoleDescriptor.IndicesPrivileges.builder().indices(".elastic-connectors*").privileges("read").build() }, null, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 2380c13e147d5..fc14ec6811014 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -402,67 +402,6 @@ private static Map initializeReservedRoles() { "Grants access necessary for the APM system user to send system-level data (such as monitoring) to Elasticsearch.\n" ) ), - entry( - "apm_user", - new RoleDescriptor( - "apm_user", - null, - new RoleDescriptor.IndicesPrivileges[] { - // Self managed APM Server - // Can be removed in 8.0 - RoleDescriptor.IndicesPrivileges.builder().indices("apm-*").privileges("read", "view_index_metadata").build(), - - // APM Server under fleet (data streams) - RoleDescriptor.IndicesPrivileges.builder().indices("logs-apm.*").privileges("read", "view_index_metadata").build(), - RoleDescriptor.IndicesPrivileges.builder().indices("logs-apm-*").privileges("read", "view_index_metadata").build(), - RoleDescriptor.IndicesPrivileges.builder() - .indices("metrics-apm.*") - .privileges("read", "view_index_metadata") - .build(), - RoleDescriptor.IndicesPrivileges.builder() - .indices("metrics-apm-*") - .privileges("read", "view_index_metadata") - .build(), - RoleDescriptor.IndicesPrivileges.builder() - .indices("traces-apm.*") - .privileges("read", "view_index_metadata") - .build(), - RoleDescriptor.IndicesPrivileges.builder() - .indices("traces-apm-*") - .privileges("read", "view_index_metadata") - .build(), - - // Machine Learning indices. Only needed for legacy reasons - // Can be removed in 8.0 - RoleDescriptor.IndicesPrivileges.builder() - .indices(".ml-anomalies*") - .privileges("read", "view_index_metadata") - .build(), - - // Annotations - RoleDescriptor.IndicesPrivileges.builder() - .indices("observability-annotations") - .privileges("read", "view_index_metadata") - .build() }, - new RoleDescriptor.ApplicationResourcePrivileges[] { - RoleDescriptor.ApplicationResourcePrivileges.builder() - .application("kibana-*") - .resources("*") - .privileges("reserved_ml_apm_user") - .build() }, - null, - null, - MetadataUtils.getDeprecatedReservedMetadata( - "This role will be removed in a future major release. Please use editor and viewer roles instead" - ), - null, - null, - null, - null, - "Grants the privileges required for APM users (such as read and view_index_metadata privileges " - + "on the apm-* and .ml-anomalies* indices)." - ) - ), entry( "inference_admin", new RoleDescriptor( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index 05f4e560b73c1..f160b704e9e12 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -580,14 +580,16 @@ private void addIndexLifecyclePoliciesIfMissing(ClusterState state) { logger.trace("running in data stream lifecycle only mode. skipping the installation of ILM policies."); return; } - IndexLifecycleMetadata metadata = state.metadata().custom(IndexLifecycleMetadata.TYPE); + final IndexLifecycleMetadata metadata = state.metadata().custom(IndexLifecycleMetadata.TYPE); + final Map policies = metadata != null ? metadata.getPolicies() : Map.of(); + for (LifecyclePolicy policy : getLifecyclePolicies()) { final AtomicBoolean creationCheck = policyCreationsInProgress.computeIfAbsent( policy.getName(), key -> new AtomicBoolean(false) ); if (creationCheck.compareAndSet(false, true)) { - final LifecyclePolicy currentPolicy = metadata != null ? metadata.getPolicies().get(policy.getName()) : null; + final LifecyclePolicy currentPolicy = policies.get(policy.getName()); if (Objects.isNull(currentPolicy)) { logger.debug("adding lifecycle policy [{}] for [{}], because it doesn't exist", policy.getName(), getOrigin()); putPolicy(policy, creationCheck); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java index a30236b2fef28..cf0a73963f864 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java @@ -11,15 +11,12 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -44,16 +41,12 @@ */ public abstract class YamlTemplateRegistry extends IndexTemplateRegistry { private static final Logger logger = LogManager.getLogger(YamlTemplateRegistry.class); - // this node feature is a redefinition of {@link DataStreamFeatures#DATA_STREAM_LIFECYCLE} and it's meant to avoid adding a - // dependency to the data-streams module just for this - public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); private final int version; private final Map componentTemplates; private final Map composableIndexTemplates; private final List ingestPipelines; private final List lifecyclePolicies; - private final FeatureService featureService; private volatile boolean enabled; public YamlTemplateRegistry( @@ -61,10 +54,9 @@ public YamlTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry, - FeatureService featureService + NamedXContentRegistry xContentRegistry ) { - this(nodeSettings, clusterService, threadPool, client, xContentRegistry, featureService, ignored -> true); + this(nodeSettings, clusterService, threadPool, client, xContentRegistry, ignored -> true); } @SuppressWarnings({ "unchecked", "this-escape" }) @@ -74,7 +66,6 @@ public YamlTemplateRegistry( ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry, - FeatureService featureService, Predicate templateFilter ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); @@ -123,7 +114,6 @@ public YamlTemplateRegistry( .filter(templateFilter) .map(this::loadLifecyclePolicy) .collect(Collectors.toList()); - this.featureService = featureService; } catch (IOException e) { throw new ElasticsearchException(e); } @@ -152,13 +142,6 @@ public void close() { clusterService.removeListener(this); } - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - // Ensure current version of the components are installed only after versions that support data stream lifecycle - // due to the use of the feature in all the `@lifecycle` component templates - return featureService.clusterHasFeature(event.state(), DATA_STREAM_LIFECYCLE); - } - @Override protected boolean requiresMasterNode() { return true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java index 47f7fea8dc199..6e78c2e8d3ef3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java @@ -184,7 +184,7 @@ public void checkForDeprecations(String id, NamedXContentRegistry namedXContentR onDeprecation.accept( // max_page_search_size got deprecated in 7.8, still accepted for 8.x, to be removed in 9.x new DeprecationIssue( - Level.WARNING, + Level.CRITICAL, "Transform [" + id + "] uses the deprecated setting [max_page_search_size]", TransformDeprecations.MAX_PAGE_SEARCH_SIZE_BREAKING_CHANGES_URL, TransformDeprecations.ACTION_MAX_PAGE_SEARCH_SIZE_IS_DEPRECATED, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 04fe20901749b..e889d25cd7a96 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -14,6 +14,7 @@ import java.util.Arrays; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -228,6 +229,50 @@ public void testLastUsedMomentaryFeature() { assertThat(lastUsed.get(usage), equalTo(200L)); } + public void testLastUsedMomentaryFeatureWithSameNameDifferentFamily() { + LicensedFeature.Momentary featureFamilyA = LicensedFeature.momentary("familyA", "goldFeature", GOLD); + LicensedFeature.Momentary featureFamilyB = LicensedFeature.momentary("familyB", "goldFeature", GOLD); + + AtomicInteger currentTime = new AtomicInteger(100); // non zero start time + XPackLicenseState licenseState = new XPackLicenseState(currentTime::get); + + featureFamilyA.check(licenseState); + featureFamilyB.check(licenseState); + + Map lastUsed = licenseState.getLastUsed(); + assertThat("feature.check tracks usage separately by family", lastUsed, aMapWithSize(2)); + Set actualFeatures = lastUsed.entrySet() + .stream() + .map(it -> new FeatureInfoWithTimestamp(it.getKey().feature().getFamily(), it.getKey().feature().getName(), it.getValue())) + .collect(Collectors.toSet()); + assertThat( + actualFeatures, + containsInAnyOrder( + new FeatureInfoWithTimestamp("familyA", "goldFeature", 100L), + new FeatureInfoWithTimestamp("familyB", "goldFeature", 100L) + ) + ); + + currentTime.set(200); + featureFamilyB.check(licenseState); + + lastUsed = licenseState.getLastUsed(); + assertThat("feature.check tracks usage separately by family", lastUsed, aMapWithSize(2)); + actualFeatures = lastUsed.entrySet() + .stream() + .map(it -> new FeatureInfoWithTimestamp(it.getKey().feature().getFamily(), it.getKey().feature().getName(), it.getValue())) + .collect(Collectors.toSet()); + assertThat( + actualFeatures, + containsInAnyOrder( + new FeatureInfoWithTimestamp("familyA", "goldFeature", 100L), + new FeatureInfoWithTimestamp("familyB", "goldFeature", 200L) + ) + ); + } + + private record FeatureInfoWithTimestamp(String family, String featureName, Long timestamp) {} + public void testLastUsedPersistentFeature() { LicensedFeature.Persistent goldFeature = LicensedFeature.persistent("family", "goldFeature", GOLD); AtomicInteger currentTime = new AtomicInteger(100); // non zero start time diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/AbstractTransportSetUpgradeModeActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/AbstractTransportSetUpgradeModeActionTests.java new file mode 100644 index 0000000000000..d780b7fbc32f4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/AbstractTransportSetUpgradeModeActionTests.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.SimpleBatchedExecutor; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AbstractTransportSetUpgradeModeActionTests extends ESTestCase { + /** + * Creates a TaskQueue that invokes the SimpleBatchedExecutor. + */ + public static ClusterService clusterService() { + AtomicReference> executor = new AtomicReference<>(); + MasterServiceTaskQueue taskQueue = mock(); + ClusterService clusterService = mock(); + doAnswer(ans -> { + executor.set(ans.getArgument(2)); + return taskQueue; + }).when(clusterService).createTaskQueue(any(), any(), any()); + doAnswer(ans -> { + if (executor.get() == null) { + fail("We should create the task queue before we submit tasks to it"); + } else { + executor.get().executeTask(ans.getArgument(1), ClusterState.EMPTY_STATE); + executor.get().taskSucceeded(ans.getArgument(1), null); + } + return null; + }).when(taskQueue).submitTask(any(), any(), any()); + return clusterService; + } + + /** + * Creates a TaskQueue that calls the listener with an error. + */ + public static ClusterService clusterServiceWithError(Exception e) { + MasterServiceTaskQueue taskQueue = mock(); + ClusterService clusterService = mock(); + when(clusterService.createTaskQueue(any(), any(), any())).thenReturn(taskQueue); + doAnswer(ans -> { + ClusterStateTaskListener listener = ans.getArgument(1); + listener.onFailure(e); + return null; + }).when(taskQueue).submitTask(any(), any(), any()); + return clusterService; + } + + /** + * TaskQueue that does nothing. + */ + public static ClusterService clusterServiceThatDoesNothing() { + ClusterService clusterService = mock(); + when(clusterService.createTaskQueue(any(), any(), any())).thenReturn(mock()); + return clusterService; + } + + public void testIdempotent() throws Exception { + // create with update mode set to false + var action = new TestTransportSetUpgradeModeAction(clusterServiceThatDoesNothing(), false); + + // flip to true but do nothing (cluster service mock won't invoke the listener) + action.runWithoutWaiting(true); + // call again + var response = action.run(true); + + assertThat(response.v1(), nullValue()); + assertThat(response.v2(), notNullValue()); + assertThat(response.v2(), instanceOf(ElasticsearchStatusException.class)); + assertThat( + response.v2().getMessage(), + is("Cannot change [upgrade_mode] for feature name [" + action.featureName() + "]. Previous request is still being processed.") + ); + } + + public void testUpdateDoesNotRun() throws Exception { + var shouldNotChange = new AtomicBoolean(true); + var action = new TestTransportSetUpgradeModeAction(true, l -> shouldNotChange.set(false)); + + var response = action.run(true); + + assertThat(response.v1(), is(AcknowledgedResponse.TRUE)); + assertThat(response.v2(), nullValue()); + assertThat(shouldNotChange.get(), is(true)); + } + + public void testErrorReleasesLock() throws Exception { + var action = new TestTransportSetUpgradeModeAction(false, l -> l.onFailure(new IllegalStateException("hello there"))); + + action.run(true); + var response = action.run(true); + assertThat( + "Previous request should have finished processing.", + response.v2().getMessage(), + not(containsString("Previous request is still being processed")) + ); + } + + public void testErrorFromAction() throws Exception { + var expectedException = new IllegalStateException("hello there"); + var action = new TestTransportSetUpgradeModeAction(false, l -> l.onFailure(expectedException)); + + var response = action.run(true); + + assertThat(response.v1(), nullValue()); + assertThat(response.v2(), is(expectedException)); + } + + public void testErrorFromTaskQueue() throws Exception { + var expectedException = new IllegalStateException("hello there"); + var action = new TestTransportSetUpgradeModeAction(clusterServiceWithError(expectedException), false); + + var response = action.run(true); + + assertThat(response.v1(), nullValue()); + assertThat(response.v2(), is(expectedException)); + } + + public void testSuccess() throws Exception { + var action = new TestTransportSetUpgradeModeAction(false, l -> l.onResponse(AcknowledgedResponse.TRUE)); + + var response = action.run(true); + + assertThat(response.v1(), is(AcknowledgedResponse.TRUE)); + assertThat(response.v2(), nullValue()); + } + + private static class TestTransportSetUpgradeModeAction extends AbstractTransportSetUpgradeModeAction { + private final boolean upgradeMode; + private final ClusterState updatedClusterState; + private final Consumer> successFunc; + + TestTransportSetUpgradeModeAction(boolean upgradeMode, Consumer> successFunc) { + super("actionName", "taskQueuePrefix", mock(), clusterService(), mock(), mock(), mock()); + this.upgradeMode = upgradeMode; + this.updatedClusterState = ClusterState.EMPTY_STATE; + this.successFunc = successFunc; + } + + TestTransportSetUpgradeModeAction(ClusterService clusterService, boolean upgradeMode) { + super("actionName", "taskQueuePrefix", mock(), clusterService, mock(), mock(), mock()); + this.upgradeMode = upgradeMode; + this.updatedClusterState = ClusterState.EMPTY_STATE; + this.successFunc = listener -> {}; + } + + public void runWithoutWaiting(boolean upgrade) throws Exception { + masterOperation(mock(), new SetUpgradeModeActionRequest(upgrade), ClusterState.EMPTY_STATE, ActionListener.noop()); + } + + public Tuple run(boolean upgrade) throws Exception { + AtomicReference> response = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + masterOperation(mock(), new SetUpgradeModeActionRequest(upgrade), ClusterState.EMPTY_STATE, ActionListener.wrap(r -> { + response.set(Tuple.tuple(r, null)); + latch.countDown(); + }, e -> { + response.set(Tuple.tuple(null, e)); + latch.countDown(); + })); + assertTrue("Failed to run TestTransportSetUpgradeModeAction in 10s", latch.await(10, TimeUnit.SECONDS)); + return response.get(); + } + + @Override + protected String featureName() { + return "test-feature-name"; + } + + @Override + protected boolean upgradeMode(ClusterState state) { + return upgradeMode; + } + + @Override + protected ClusterState createUpdatedState(SetUpgradeModeActionRequest request, ClusterState state) { + return updatedClusterState; + } + + @Override + protected void upgradeModeSuccessfullyChanged( + Task task, + SetUpgradeModeActionRequest request, + ClusterState state, + ActionListener listener + ) { + successFunc.accept(listener); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/RemainingTimeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/RemainingTimeTests.java index 3a948608f6ae3..1e6bc2a51f6e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/RemainingTimeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/time/RemainingTimeTests.java @@ -32,6 +32,11 @@ public void testRemainingTimeMaxValue() { assertThat(remainingTime.get(), Matchers.equalTo(TimeValue.ZERO)); } + public void testMaxTime() { + var remainingTime = RemainingTime.from(Instant::now, TimeValue.MAX_VALUE); + assertThat(remainingTime.get(), Matchers.equalTo(TimeValue.MAX_VALUE)); + } + // always add the first value, which is read when RemainingTime.from is called, then add the test values private Supplier times(Instant... instants) { var startTime = Stream.of(Instant.now()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java index ce8cd5ae46ace..bee6351582bc9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.LifecycleExecutionState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; @@ -185,13 +184,12 @@ public void testNameGeneration() { assertThat(generateSnapshotName("name"), startsWith("name-")); assertThat(generateSnapshotName("name").length(), greaterThan("name-".length())); - IndexNameExpressionResolver.ResolverContext resolverContext = new IndexNameExpressionResolver.ResolverContext(time); - assertThat(generateSnapshotName("", resolverContext), startsWith("name-2019.03.15-")); - assertThat(generateSnapshotName("", resolverContext).length(), greaterThan("name-2019.03.15-".length())); + assertThat(generateSnapshotName("", time), startsWith("name-2019.03.15-")); + assertThat(generateSnapshotName("", time).length(), greaterThan("name-2019.03.15-".length())); - assertThat(generateSnapshotName("", resolverContext), startsWith("name-2019.03.01-")); + assertThat(generateSnapshotName("", time), startsWith("name-2019.03.01-")); - assertThat(generateSnapshotName("", resolverContext), startsWith("name-2019-03-15.21:09:00-")); + assertThat(generateSnapshotName("", time), startsWith("name-2019-03-15.21:09:00-")); } public void testNameValidation() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigTests.java index cf4630899ab53..a91cceec8a167 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigTests.java @@ -7,10 +7,8 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.inference.InferenceConfigItemTestCase; @@ -67,13 +65,4 @@ protected TextExpansionConfig doParseInstance(XContentParser parser) throws IOEx protected TextExpansionConfig mutateInstanceForVersion(TextExpansionConfig instance, TransportVersion version) { return instance; } - - public void testBertTokenizationOnly() { - ElasticsearchStatusException e = expectThrows( - ElasticsearchStatusException.class, - () -> new TextExpansionConfig(null, RobertaTokenizationTests.createRandom(), null) - ); - assertEquals(RestStatus.BAD_REQUEST, e.status()); - assertEquals("text expansion models must be configured with BERT tokenizer, [roberta] given", e.getMessage()); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/CronTimezoneTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/CronTimezoneTests.java new file mode 100644 index 0000000000000..1e469002457d8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/scheduler/CronTimezoneTests.java @@ -0,0 +1,231 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.scheduler; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; +import java.time.zone.ZoneOffsetTransition; +import java.time.zone.ZoneRules; + +import static java.time.Instant.ofEpochMilli; +import static java.util.TimeZone.getTimeZone; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.not; + +public class CronTimezoneTests extends ESTestCase { + + public void testForFixedOffsetCorrectlyCalculateNextRuntime() { + Cron cron = new Cron("0 0 2 * * ?", ZoneOffset.of("+1")); + long midnightUTC = Instant.parse("2020-01-01T00:00:00Z").toEpochMilli(); + long nextValidTimeAfter = cron.getNextValidTimeAfter(midnightUTC); + assertThat(Instant.ofEpochMilli(nextValidTimeAfter), equalTo(Instant.parse("2020-01-01T01:00:00Z"))); + } + + public void testForLondonFixedDSTTransitionCheckCorrectSchedule() { + ZoneId londonZone = getTimeZone("Europe/London").toZoneId(); + + Cron cron = new Cron("0 0 2 * * ?", londonZone); + ZoneRules londonZoneRules = londonZone.getRules(); + Instant springMidnight = Instant.parse("2020-03-01T00:00:00Z"); + long timeBeforeDST = springMidnight.toEpochMilli(); + + assertThat(cron.getNextValidTimeAfter(timeBeforeDST), equalTo(Instant.parse("2020-03-01T02:00:00Z").toEpochMilli())); + + ZoneOffsetTransition zoneOffsetTransition = londonZoneRules.nextTransition(springMidnight); + + Instant timeAfterDST = zoneOffsetTransition.getDateTimeBefore() + .plusDays(1) + .atZone(ZoneOffset.UTC) + .withHour(0) + .withMinute(0) + .toInstant(); + + assertThat(cron.getNextValidTimeAfter(timeAfterDST.toEpochMilli()), equalTo(Instant.parse("2020-03-30T01:00:00Z").toEpochMilli())); + } + + public void testRandomDSTTransitionCalculateNextTimeCorrectlyRelativeToUTC() { + ZoneId timeZone = generateRandomDSTZone(); + + logger.info("Testing for timezone {}", timeZone); + + ZoneOffsetTransition zoneOffsetTransition = timeZone.getRules().nextTransition(Instant.now()); + + ZonedDateTime midnightBefore = zoneOffsetTransition.getDateTimeBefore().atZone(timeZone).minusDays(2).withHour(0).withMinute(0); + ZonedDateTime midnightAfter = zoneOffsetTransition.getDateTimeAfter().atZone(timeZone).plusDays(2).withHour(0).withMinute(0); + + long epochBefore = midnightBefore.toInstant().toEpochMilli(); + long epochAfter = midnightAfter.toInstant().toEpochMilli(); + + Cron cron = new Cron("0 0 2 * * ?", timeZone); + + long nextScheduleBefore = cron.getNextValidTimeAfter(epochBefore); + long nextScheduleAfter = cron.getNextValidTimeAfter(epochAfter); + + assertThat(nextScheduleBefore - epochBefore, equalTo(2 * 60 * 60 * 1000L)); // 2 hours + assertThat(nextScheduleAfter - epochAfter, equalTo(2 * 60 * 60 * 1000L)); // 2 hours + + ZonedDateTime utcMidnightBefore = zoneOffsetTransition.getDateTimeBefore() + .atZone(ZoneOffset.UTC) + .minusDays(2) + .withHour(0) + .withMinute(0); + + ZonedDateTime utcMidnightAfter = zoneOffsetTransition.getDateTimeAfter() + .atZone(ZoneOffset.UTC) + .plusDays(2) + .withHour(0) + .withMinute(0); + + long utcEpochBefore = utcMidnightBefore.toInstant().toEpochMilli(); + long utcEpochAfter = utcMidnightAfter.toInstant().toEpochMilli(); + + long nextUtcScheduleBefore = cron.getNextValidTimeAfter(utcEpochBefore); + long nextUtcScheduleAfter = cron.getNextValidTimeAfter(utcEpochAfter); + + assertThat(nextUtcScheduleBefore - utcEpochBefore, not(equalTo(nextUtcScheduleAfter - utcEpochAfter))); + + } + + private ZoneId generateRandomDSTZone() { + ZoneId timeZone; + int i = 0; + boolean found; + do { + timeZone = randomZone(); + found = getTimeZone(timeZone).useDaylightTime(); + i++; + } while (found == false && i <= 500); // Infinite loop prevention + + if (found == false) { + fail("Could not find a timezone with DST"); + } + + logger.debug("Testing for timezone {} after {} iterations", timeZone, i); + return timeZone; + } + + public void testForGMTGapTransitionTriggerTimeIsAsIfTransitionHasntHappenedYet() { + ZoneId london = ZoneId.of("Europe/London"); + Cron cron = new Cron("0 30 1 * * ?", london); // Every day at 1:30 + + Instant beforeTransition = Instant.parse("2025-03-30T00:00:00Z"); + long beforeTransitionEpoch = beforeTransition.toEpochMilli(); + + long nextValidTimeAfter = cron.getNextValidTimeAfter(beforeTransitionEpoch); + assertThat(ofEpochMilli(nextValidTimeAfter), equalTo(Instant.parse("2025-03-30T01:30:00Z"))); + } + + public void testForGMTOverlapTransitionTriggerSkipSecondExecution() { + ZoneId london = ZoneId.of("Europe/London"); + Cron cron = new Cron("0 30 1 * * ?", london); // Every day at 01:30 + + Instant beforeTransition = Instant.parse("2024-10-27T00:00:00Z"); + long beforeTransitionEpoch = beforeTransition.toEpochMilli(); + + long firstValidTimeAfter = cron.getNextValidTimeAfter(beforeTransitionEpoch); + assertThat(ofEpochMilli(firstValidTimeAfter), equalTo(Instant.parse("2024-10-27T00:30:00Z"))); + + long nextValidTimeAfter = cron.getNextValidTimeAfter(firstValidTimeAfter); + assertThat(ofEpochMilli(nextValidTimeAfter), equalTo(Instant.parse("2024-10-28T01:30:00Z"))); + } + + // This test checks that once per minute crons will be unaffected by a DST transition + public void testDiscontinuityResolutionForNonHourCronInRandomTimezone() { + var timezone = generateRandomDSTZone(); + + var cron = new Cron("0 * * * * ?", timezone); // Once per minute + + Instant referenceTime = randomInstantBetween(Instant.now(), Instant.now().plus(1826, ChronoUnit.DAYS)); // ~5 years + ZoneOffsetTransition transition1 = timezone.getRules().nextTransition(referenceTime); + + // Currently there are no known timezones with DST transitions shorter than 10 minutes but this guards against future changes + if (Math.abs(transition1.getOffsetBefore().getTotalSeconds() - transition1.getOffsetAfter().getTotalSeconds()) < 600) { + fail("Transition is not long enough to test"); + } + + testNonHourCronTransition(transition1, cron); + + var transition2 = timezone.getRules().nextTransition(transition1.getInstant().plus(1, ChronoUnit.DAYS)); + + testNonHourCronTransition(transition2, cron); + + } + + private static void testNonHourCronTransition(ZoneOffsetTransition transition, Cron cron) { + Instant insideTransition; + if (transition.isGap()) { + insideTransition = transition.getInstant().plus(10, ChronoUnit.MINUTES); + Instant nextTrigger = ofEpochMilli(cron.getNextValidTimeAfter(insideTransition.toEpochMilli())); + assertThat(nextTrigger, equalTo(insideTransition.plus(1, ChronoUnit.MINUTES))); + } else { + insideTransition = transition.getInstant().minus(10, ChronoUnit.MINUTES); + Instant nextTrigger = ofEpochMilli(cron.getNextValidTimeAfter(insideTransition.toEpochMilli())); + assertThat(nextTrigger, equalTo(insideTransition.plus(1, ChronoUnit.MINUTES))); + + insideTransition = insideTransition.plus(transition.getDuration()); + nextTrigger = ofEpochMilli(cron.getNextValidTimeAfter(insideTransition.toEpochMilli())); + assertThat(nextTrigger, equalTo(insideTransition.plus(1, ChronoUnit.MINUTES))); + } + } + + // This test checks that once per day crons will behave correctly during a DST transition + public void testDiscontinuityResolutionForCronInRandomTimezone() { + var timezone = generateRandomDSTZone(); + + Instant referenceTime = randomInstantBetween(Instant.now(), Instant.now().plus(1826, ChronoUnit.DAYS)); // ~5 years + ZoneOffsetTransition transition1 = timezone.getRules().nextTransition(referenceTime); + + // Currently there are no known timezones with DST transitions shorter than 10 minutes but this guards against future changes + if (Math.abs(transition1.getOffsetBefore().getTotalSeconds() - transition1.getOffsetAfter().getTotalSeconds()) < 600) { + fail("Transition is not long enough to test"); + } + + testHourCronTransition(transition1, timezone); + + var transition2 = timezone.getRules().nextTransition(transition1.getInstant().plus(1, ChronoUnit.DAYS)); + + testHourCronTransition(transition2, timezone); + } + + private static void testHourCronTransition(ZoneOffsetTransition transition, ZoneId timezone) { + if (transition.isGap()) { + LocalDateTime targetTime = transition.getDateTimeBefore().plusMinutes(10); + + var cron = new Cron("0 " + targetTime.getMinute() + " " + targetTime.getHour() + " * * ?", timezone); + + long nextTrigger = cron.getNextValidTimeAfter(transition.getInstant().minus(10, ChronoUnit.MINUTES).toEpochMilli()); + + assertThat(ofEpochMilli(nextTrigger), equalTo(transition.getInstant().plus(10, ChronoUnit.MINUTES))); + } else { + LocalDateTime targetTime = transition.getDateTimeAfter().plusMinutes(10); + var cron = new Cron("0 " + targetTime.getMinute() + " " + targetTime.getHour() + " * * ?", timezone); + + long transitionLength = Math.abs(transition.getDuration().toSeconds()); + long firstTrigger = cron.getNextValidTimeAfter( + transition.getInstant().minusSeconds(transitionLength).minus(10, ChronoUnit.MINUTES).toEpochMilli() + ); + + assertThat( + ofEpochMilli(firstTrigger), + equalTo(transition.getInstant().minusSeconds(transitionLength).plus(10, ChronoUnit.MINUTES)) + ); + + var repeatTrigger = cron.getNextValidTimeAfter(firstTrigger + (1000 * 60L)); // 1 minute + + assertThat(repeatTrigger - firstTrigger, Matchers.greaterThan(24 * 60 * 60 * 1000L)); // 24 hours + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java index 218876c7d40e8..3ca6777512420 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java @@ -31,9 +31,12 @@ import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; +import org.elasticsearch.xpack.core.security.authz.restriction.Workflow; +import org.elasticsearch.xpack.core.security.authz.restriction.WorkflowResolver; import org.hamcrest.Matchers; import java.io.IOException; @@ -47,7 +50,6 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.SECURITY_ROLE_DESCRIPTION; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.WORKFLOWS_RESTRICTION_VERSION; -import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomIndicesPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomIndicesPrivilegesBuilder; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomRemoteClusterPermissions; import static org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions.ROLE_REMOTE_CLUSTER_PRIVS; @@ -1338,38 +1340,191 @@ public void testIsEmpty() { } } - public void testHasPrivilegesOtherThanIndex() { + public void testHasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster() { + // any index and some cluster privileges are allowed assertThat( new RoleDescriptor( "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), // all of these are allowed + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(false) + ); + // any index and some cluster privileges are allowed + assertThat( + new RoleDescriptor( + "name", + new String[] { "manage_security" }, // unlikely we will ever support allowing manage security across clusters + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + null, + null, + null, + null, + null, + null, + null, + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + + // application privileges are not allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + new ApplicationResourcePrivileges[] { + ApplicationResourcePrivileges.builder().application("app").privileges("foo").resources("res").build() }, + null, + null, + null, + null, + null, + null, + null, + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + + // configurable cluster privileges are not allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Collections.singleton("foo")) }, + null, + null, + null, + null, + null, + null, + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + + // run as is not allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + null, + new String[] { "foo" }, + null, + null, + null, + null, + null, + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + + // workflows restriction is not allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, null, - randomBoolean() ? null : randomIndicesPrivileges(1, 5), null, null, null, null, null, null, + new RoleDescriptor.Restriction(WorkflowResolver.allWorkflows().stream().map(Workflow::name).toArray(String[]::new)), + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + // remote indices privileges are not allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + null, + null, + null, + null, + new RoleDescriptor.RemoteIndicesPrivileges[] { + RoleDescriptor.RemoteIndicesPrivileges.builder("rmt").indices("idx").privileges("foo").build() }, null, null, null ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + // remote cluster privileges are not allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + null, + null, + null, + null, + null, + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new String[] { "rmt" } + ) + ), + null, + null + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), + is(true) + ); + + // metadata, transient metadata and description are allowed + assertThat( + new RoleDescriptor( + "name", + RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("idx").privileges("foo").build() }, + null, + null, + null, + Collections.singletonMap("foo", "bar"), + Collections.singletonMap("foo", "bar"), + null, + null, + null, + "description" + ).hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), is(false) ); - final RoleDescriptor roleDescriptor = RoleDescriptorTestHelper.builder() - .allowReservedMetadata(true) - .allowRemoteIndices(true) - .allowRestriction(true) - .allowDescription(true) - .allowRemoteClusters(true) - .build(); - final boolean expected = roleDescriptor.hasClusterPrivileges() - || roleDescriptor.hasConfigurableClusterPrivileges() - || roleDescriptor.hasApplicationPrivileges() - || roleDescriptor.hasRunAs() - || roleDescriptor.hasRemoteIndicesPrivileges() - || roleDescriptor.hasWorkflowsRestriction(); - assertThat(roleDescriptor.hasUnsupportedPrivilegesInsideAPIKeyConnectedRemoteCluster(), equalTo(expected)); } private static void resetFieldPermssionsCache() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index fb4d822b7655c..17579fd6368ce 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1106,6 +1106,28 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); }); + // index for Security Solution workflow insights + Arrays.asList(".edr-workflow-insights-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((index) -> { + final IndexAbstraction indexAbstraction = mockIndexAbstraction(index); + assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(READ_CROSS_CLUSTER_NAME).test(indexAbstraction), is(false)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(indexAbstraction), + is(true) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportPutMappingAction.TYPE.name()).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(RolloverAction.NAME).test(indexAbstraction), is(true)); + }); + // Data telemetry reads mappings, metadata and stats of indices Arrays.asList(randomAlphaOfLengthBetween(8, 24), "packetbeat-*").forEach((index) -> { logger.info("index name [{}]", index); @@ -3058,89 +3080,6 @@ public void testAPMSystemRole() { assertNoAccessAllowed(APMSystemRole, XPackPlugin.ASYNC_RESULTS_INDEX + randomAlphaOfLengthBetween(0, 2)); } - public void testAPMUserRole() { - final TransportRequest request = mock(TransportRequest.class); - final Authentication authentication = AuthenticationTestHelper.builder().build(); - - final RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("apm_user"); - assertNotNull(roleDescriptor); - assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); - - final String allowedApplicationActionPattern = "example/custom/action/*"; - final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); - Role role = Role.buildFromRoleDescriptor( - roleDescriptor, - new FieldPermissionsCache(Settings.EMPTY), - RESTRICTED_INDICES, - List.of( - new ApplicationPrivilegeDescriptor( - kibanaApplicationWithRandomIndex, - "reserved_ml_apm_user", - Set.of(allowedApplicationActionPattern), - Map.of() - ) - ) - ); - - assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); - assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); - - assertNoAccessAllowed(role, "foo"); - assertNoAccessAllowed(role, "foo-apm"); - assertNoAccessAllowed(role, "foo-logs-apm.bar"); - assertNoAccessAllowed(role, "foo-logs-apm-bar"); - assertNoAccessAllowed(role, "foo-traces-apm.bar"); - assertNoAccessAllowed(role, "foo-traces-apm-bar"); - assertNoAccessAllowed(role, "foo-metrics-apm.bar"); - assertNoAccessAllowed(role, "foo-metrics-apm-bar"); - - assertOnlyReadAllowed(role, "logs-apm." + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, "logs-apm-" + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, "traces-apm." + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, "traces-apm-" + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, "metrics-apm." + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, "metrics-apm-" + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, "apm-" + randomIntBetween(0, 5)); - assertOnlyReadAllowed(role, AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT); - - assertOnlyReadAllowed(role, "observability-annotations"); - - assertThat( - role.application().grants(ApplicationPrivilegeTests.createPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), - is(false) - ); - assertThat( - role.application() - .grants( - ApplicationPrivilegeTests.createPrivilege( - kibanaApplicationWithRandomIndex, - "app-reserved_ml_apm_user", - allowedApplicationActionPattern - ), - "*" - ), - is(true) - ); - - final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); - assertThat( - role.application().grants(ApplicationPrivilegeTests.createPrivilege(otherApplication, "app-foo", "foo"), "*"), - is(false) - ); - assertThat( - role.application() - .grants( - ApplicationPrivilegeTests.createPrivilege( - otherApplication, - "app-reserved_ml_apm_user", - allowedApplicationActionPattern - ), - "*" - ), - is(false) - ); - } - public void testMachineLearningAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java index bbf80279b0b2a..60db8b6522518 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManagerTests.java @@ -218,7 +218,7 @@ public void testThatDelegateTrustManagerIsRespected() throws Exception { if (cert.endsWith("/ca")) { assertTrusted(trustManager, cert); } else { - assertNotValid(trustManager, cert, inFipsJvm() ? "Unable to find certificate chain." : "PKIX path building failed.*"); + assertNotValid(trustManager, cert, inFipsJvm() ? "Unable to construct a valid chain" : "PKIX path building failed.*"); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java index e396712cbc360..356fac4539137 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java @@ -726,7 +726,7 @@ private static void assertPutPipelineAction( putRequest.getSource(), putRequest.getXContentType() ); - List processors = (List) pipelineConfiguration.getConfigAsMap().get("processors"); + List processors = (List) pipelineConfiguration.getConfig().get("processors"); assertThat(processors, hasSize(1)); Map setProcessor = (Map) ((Map) processors.get(0)).get("set"); assertNotNull(setProcessor.get("field")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java index f1c2de11496bf..8cfecc432c661 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java @@ -930,7 +930,7 @@ public void testCheckForDeprecations() { equalTo( Collections.singletonList( new DeprecationIssue( - Level.WARNING, + Level.CRITICAL, "Transform [" + id + "] uses the deprecated setting [max_page_search_size]", TransformDeprecations.MAX_PAGE_SEARCH_SIZE_BREAKING_CHANGES_URL, TransformDeprecations.ACTION_MAX_PAGE_SEARCH_SIZE_IS_DEPRECATED, @@ -952,7 +952,7 @@ public void testCheckForDeprecations() { equalTo( List.of( new DeprecationIssue( - Level.WARNING, + Level.CRITICAL, "Transform [" + id + "] uses the deprecated setting [max_page_search_size]", TransformDeprecations.MAX_PAGE_SEARCH_SIZE_BREAKING_CHANGES_URL, TransformDeprecations.ACTION_MAX_PAGE_SEARCH_SIZE_IS_DEPRECATED, @@ -982,7 +982,7 @@ public void testCheckForDeprecations() { null ), new DeprecationIssue( - Level.WARNING, + Level.CRITICAL, "Transform [" + id + "] uses the deprecated setting [max_page_search_size]", TransformDeprecations.MAX_PAGE_SEARCH_SIZE_BREAKING_CHANGES_URL, TransformDeprecations.ACTION_MAX_PAGE_SEARCH_SIZE_IS_DEPRECATED, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index 8f50ebd334f16..c7424571dd678 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -9,19 +9,22 @@ "sort": { "field": [ "profiling.project.id", + "k8s.namespace.name", "orchestrator.resource.name", "host.name", "container.name", "process.thread.name" ] + }, + "mapping": { + "source": { + "mode": "synthetic" + } } }, "codec": "best_compression" }, "mappings": { - "_source": { - "mode": "synthetic" - }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, "index-version": ${xpack.profiling.index.events.version}, @@ -80,6 +83,9 @@ }, "container.id": { "type": "keyword" + }, + "k8s.namespace.name": { + "type": "keyword" } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json index f1e5e01d50c16..ac72a03202646 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json @@ -5,13 +5,15 @@ "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, - "lifecycle.rollover_alias": "profiling-executables" + "lifecycle.rollover_alias": "profiling-executables", + "mapping": { + "source": { + "mode": "synthetic" + } + } } }, "mappings": { - "_source": { - "mode": "synthetic" - }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, "index-version": ${xpack.profiling.index.executables.version}, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json index 35f53a36b2d0b..bb893a07c70a1 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json @@ -10,14 +10,16 @@ "@timestamp", "host.id" ] + }, + "mapping": { + "source": { + "mode": "synthetic" + } } }, "codec": "best_compression" }, "mappings": { - "_source": { - "mode": "synthetic" - }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, "index-version": ${xpack.profiling.index.metrics.version}, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json index 6c96fb21673ae..1170e3a32d8e2 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json @@ -11,13 +11,15 @@ "field": [ "Stacktrace.frame.ids" ] + }, + "mapping": { + "source": { + "mode": "synthetic" + } } } }, "mappings": { - "_source": { - "mode": "synthetic" - }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, "index-version": ${xpack.profiling.index.stacktraces.version}, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json index 71c4d15989b7a..d5d24a22fc58e 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json @@ -7,13 +7,15 @@ "index": { "auto_expand_replicas": "0-1", "refresh_interval": "10s", - "hidden": true + "hidden": true, + "mapping": { + "source": { + "mode": "synthetic" + } + } } }, "mappings": { - "_source": { - "mode": "synthetic" - }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, "index-version": ${xpack.profiling.index.sq.executables.version}, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json index 20849bfe8f27d..b56b4b2874743 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json @@ -7,13 +7,15 @@ "index": { "auto_expand_replicas": "0-1", "refresh_interval": "10s", - "hidden": true + "hidden": true, + "mapping": { + "source": { + "mode": "synthetic" + } + } } }, "mappings": { - "_source": { - "mode": "synthetic" - }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, "index-version": ${xpack.profiling.index.sq.leafframes.version}, diff --git a/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle b/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle index 2d8859bdcea3d..7e61533c818ec 100644 --- a/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle +++ b/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.util.GradleUtils -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.base-internal-es-plugin' apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -37,6 +43,6 @@ testClusters.configureEach { // Test clusters run with security disabled tasks.named("javaRestTest") { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/deprecation/qa/rest/build.gradle b/x-pack/plugin/deprecation/qa/rest/build.gradle index 70c0cadbce375..45b543d910a75 100644 --- a/x-pack/plugin/deprecation/qa/rest/build.gradle +++ b/x-pack/plugin/deprecation/qa/rest/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.util.GradleUtils -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.base-internal-es-plugin' apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -34,5 +40,5 @@ testClusters.configureEach { // Test clusters run with security disabled tasks.named("javaRestTest") { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index d13f3cda2a82c..f9b2cc5afe3a5 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -96,7 +96,8 @@ private DeprecationChecks() {} IndexDeprecationChecks::checkIndexDataPath, IndexDeprecationChecks::storeTypeSettingCheck, IndexDeprecationChecks::frozenIndexSettingCheck, - IndexDeprecationChecks::deprecatedCamelCasePattern + IndexDeprecationChecks::deprecatedCamelCasePattern, + IndexDeprecationChecks::checkSourceModeInMapping ); static List> DATA_STREAM_CHECKS = List.of( diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 8144d960df2e8..aaf58a44a6565 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -16,6 +16,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.engine.frozen.FrozenEngine; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.ArrayList; @@ -201,6 +202,31 @@ static List findInPropertiesRecursively( return issues; } + static DeprecationIssue checkSourceModeInMapping(IndexMetadata indexMetadata, ClusterState clusterState) { + if (SourceFieldMapper.onOrAfterDeprecateModeVersion(indexMetadata.getCreationVersion())) { + boolean[] useSourceMode = { false }; + fieldLevelMappingIssue(indexMetadata, ((mappingMetadata, sourceAsMap) -> { + Object source = sourceAsMap.get("_source"); + if (source instanceof Map sourceMap) { + if (sourceMap.containsKey("mode")) { + useSourceMode[0] = true; + } + } + })); + if (useSourceMode[0]) { + return new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + SourceFieldMapper.DEPRECATION_WARNING, + "https://github.com/elastic/elasticsearch/pull/117172", + SourceFieldMapper.DEPRECATION_WARNING, + false, + null + ); + } + } + return null; + } + static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata, ClusterState clusterState) { List fields = new ArrayList<>(); fieldLevelMappingIssue( diff --git a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle index 6b1c7e42c0fde..236c851febd6c 100644 --- a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle @@ -6,7 +6,6 @@ */ import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-yaml-rest-test' @@ -29,7 +28,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter("8.10.0") && bwcVersion != VersionProperties.elasticsearchVersion } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> def yamlRestTest = tasks.register("v${bwcVersion}#yamlRestTest", StandaloneRestIntegTestTask) { usesDefaultDistribution() diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index ba5ac7b0c7317..54e07558464d1 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-test-artifact' @@ -32,7 +30,7 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/downsample/qa/with-security/build.gradle b/x-pack/plugin/downsample/qa/with-security/build.gradle index 5eed735950187..29980b95d0291 100644 --- a/x-pack/plugin/downsample/qa/with-security/build.gradle +++ b/x-pack/plugin/downsample/qa/with-security/build.gradle @@ -6,7 +6,6 @@ */ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' @@ -28,7 +27,7 @@ testClusters.configureEach { user username: 'elastic_admin', password: 'admin-password' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/enrich/qa/rest-with-advanced-security/build.gradle b/x-pack/plugin/enrich/qa/rest-with-advanced-security/build.gradle index 2e649e718b081..6a1f820e36205 100644 --- a/x-pack/plugin/enrich/qa/rest-with-advanced-security/build.gradle +++ b/x-pack/plugin/enrich/qa/rest-with-advanced-security/build.gradle @@ -1,6 +1,11 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -import org.elasticsearch.gradle.internal.info.BuildParams +apply plugin: 'elasticsearch.legacy-java-rest-test' dependencies { javaRestTestImplementation project(path: xpackModule('core')) diff --git a/x-pack/plugin/enrich/qa/rest-with-security/build.gradle b/x-pack/plugin/enrich/qa/rest-with-security/build.gradle index 69fec4ad32c74..17a213a6e7f0d 100644 --- a/x-pack/plugin/enrich/qa/rest-with-security/build.gradle +++ b/x-pack/plugin/enrich/qa/rest-with-security/build.gradle @@ -1,12 +1,17 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -import org.elasticsearch.gradle.internal.info.BuildParams +apply plugin: 'elasticsearch.legacy-java-rest-test' dependencies { javaRestTestImplementation project(path: xpackModule('core')) javaRestTestImplementation project(path: xpackModule('enrich:qa:common')) } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/enrich/qa/rest/build.gradle b/x-pack/plugin/enrich/qa/rest/build.gradle index f96eff5f933c4..cf3c687004cbb 100644 --- a/x-pack/plugin/enrich/qa/rest/build.gradle +++ b/x-pack/plugin/enrich/qa/rest/build.gradle @@ -1,10 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' - import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams restResources { restApi { @@ -19,7 +24,7 @@ dependencies { javaRestTestImplementation project(path: xpackModule('enrich:qa:common')) } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichProcessorIT.java b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichProcessorIT.java index d646aed11d7d9..5fc16034465d4 100644 --- a/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichProcessorIT.java +++ b/x-pack/plugin/enrich/src/internalClusterTest/java/org/elasticsearch/xpack/enrich/EnrichProcessorIT.java @@ -9,9 +9,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; -import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.plugins.Plugin; @@ -27,6 +25,7 @@ import java.util.Collection; import java.util.List; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.jsonSimulatePipelineRequest; import static org.elasticsearch.xpack.enrich.AbstractEnrichTestCase.createSourceIndices; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -90,7 +89,7 @@ public void testEnrichCacheValuesCannotBeCorrupted() { var executePolicyRequest = new ExecuteEnrichPolicyAction.Request(TEST_REQUEST_TIMEOUT, policyName); client().execute(ExecuteEnrichPolicyAction.INSTANCE, executePolicyRequest).actionGet(); - var simulatePipelineRequest = new SimulatePipelineRequest(new BytesArray(""" + var simulatePipelineRequest = jsonSimulatePipelineRequest(""" { "pipeline": { "processors": [ @@ -119,7 +118,7 @@ public void testEnrichCacheValuesCannotBeCorrupted() { } ] } - """), XContentType.JSON); + """); var response = clusterAdmin().simulatePipeline(simulatePipelineRequest).actionGet(); var result = (SimulateDocumentBaseResult) response.getResults().get(0); assertThat(result.getFailure(), nullValue()); @@ -132,7 +131,7 @@ public void testEnrichCacheValuesCannotBeCorrupted() { assertThat(statsResponse.getCacheStats().get(0).misses(), equalTo(1L)); assertThat(statsResponse.getCacheStats().get(0).hits(), equalTo(0L)); - simulatePipelineRequest = new SimulatePipelineRequest(new BytesArray(""" + simulatePipelineRequest = jsonSimulatePipelineRequest(""" { "pipeline": { "processors": [ @@ -155,7 +154,7 @@ public void testEnrichCacheValuesCannotBeCorrupted() { } ] } - """), XContentType.JSON); + """); response = clusterAdmin().simulatePipeline(simulatePipelineRequest).actionGet(); result = (SimulateDocumentBaseResult) response.getResults().get(0); assertThat(result.getFailure(), nullValue()); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java index 7cddd7e037742..512955a5fe2eb 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java @@ -56,7 +56,7 @@ static boolean exists(ClusterState clusterState) { if (ingestMetadata != null) { final PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(pipelineName()); if (pipeline != null) { - Object version = pipeline.getConfigAsMap().get("version"); + Object version = pipeline.getConfig().get("version"); return version instanceof Number number && number.intValue() >= ENRICH_PIPELINE_LAST_UPDATED_VERSION; } } diff --git a/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle b/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle index e84adf0c0325d..1e1973a118074 100644 --- a/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle @@ -5,7 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -17,7 +16,7 @@ dependencies { javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"), "javaRestTest")) } -BuildParams.bwcVersions.withWireCompatible(v -> v.after("8.8.0")) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(v -> v.after("8.8.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java index a354ca4b4b31c..df1c76ccf770f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java @@ -456,7 +456,6 @@ public Collection createComponents(PluginServices services) { // Behavioral analytics components final AnalyticsTemplateRegistry analyticsTemplateRegistry = new AnalyticsTemplateRegistry( services.clusterService(), - services.featureService(), services.threadPool(), services.client(), services.xContentRegistry() @@ -466,7 +465,6 @@ public Collection createComponents(PluginServices services) { // Connector components final ConnectorTemplateRegistry connectorTemplateRegistry = new ConnectorTemplateRegistry( services.clusterService(), - services.featureService(), services.threadPool(), services.client(), services.xContentRegistry() diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index 86882a28ec39f..ba121f2cf865e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -7,15 +7,11 @@ package org.elasticsearch.xpack.application; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; -import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import org.elasticsearch.xpack.application.rules.action.ListQueryRulesetsAction; import org.elasticsearch.xpack.application.rules.retriever.QueryRuleRetrieverBuilder; -import java.util.Map; import java.util.Set; import static org.elasticsearch.xpack.application.rules.action.TestQueryRulesetAction.QUERY_RULES_TEST_API; @@ -30,14 +26,4 @@ public Set getFeatures() { ListQueryRulesetsAction.QUERY_RULE_LIST_TYPES ); } - - @Override - public Map getHistoricalFeatures() { - return Map.of( - ConnectorTemplateRegistry.CONNECTOR_TEMPLATES_FEATURE, - Version.V_8_10_0, - AnalyticsTemplateRegistry.ANALYTICS_TEMPLATE_FEATURE, - Version.V_8_12_0 - ); - } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java index d9f433b8052bf..99a239dd617a2 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java @@ -7,13 +7,10 @@ package org.elasticsearch.xpack.application.analytics; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -36,8 +33,6 @@ public class AnalyticsTemplateRegistry extends IndexTemplateRegistry { - public static final NodeFeature ANALYTICS_TEMPLATE_FEATURE = new NodeFeature("behavioral_analytics.templates"); - // This number must be incremented when we make changes to built-in templates. static final int REGISTRY_VERSION = 3; @@ -100,17 +95,13 @@ protected List getIngestPipelines() { ) ); - private final FeatureService featureService; - public AnalyticsTemplateRegistry( ClusterService clusterService, - FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); - this.featureService = featureService; } @Override @@ -138,9 +129,4 @@ protected boolean requiresMasterNode() { // there and the ActionNotFoundTransportException errors are then prevented. return true; } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - return featureService.clusterHasFeature(event.state(), ANALYTICS_TEMPLATE_FEATURE); - } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java index 34292c4669333..5706e5e384053 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java @@ -7,8 +7,9 @@ package org.elasticsearch.xpack.application.analytics.action; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.core.Tuple; import org.elasticsearch.license.XPackLicenseState; @@ -48,11 +49,26 @@ public List routes() { @Override protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeClient client) { - PostAnalyticsEventAction.Request request = buidRequest(restRequest); + Tuple sourceTuple = restRequest.contentOrSourceParam(); + + var content = sourceTuple.v2(); + PostAnalyticsEventAction.RequestBuilder builder = PostAnalyticsEventAction.Request.builder( + restRequest.param("collection_name"), + restRequest.param("event_type"), + sourceTuple.v1(), + content + ); + + builder.debug(restRequest.paramAsBoolean("debug", false)); + + final Map> headers = restRequest.getHeaders(); + builder.headers(headers); + builder.clientAddress(getClientAddress(restRequest, headers)); + return channel -> client.execute( PostAnalyticsEventAction.INSTANCE, - request, - new RestToXContentListener<>(channel, r -> RestStatus.ACCEPTED) + builder.request(), + ActionListener.withRef(new RestToXContentListener<>(channel, r -> RestStatus.ACCEPTED), content) ); } @@ -71,22 +87,4 @@ private static InetAddress getClientAddress(RestRequest restRequest, Map sourceTuple = restRequest.contentOrSourceParam(); - - PostAnalyticsEventAction.RequestBuilder builder = PostAnalyticsEventAction.Request.builder( - restRequest.param("collection_name"), - restRequest.param("event_type"), - sourceTuple.v1(), - sourceTuple.v2() - ); - - builder.debug(restRequest.paramAsBoolean("debug", false)); - - final Map> headers = restRequest.getHeaders(); - builder.headers(headers); - builder.clientAddress(getClientAddress(restRequest, headers)); - - return builder.request(); - } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java index 7badf6926c574..387224408a14b 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorCustomSchedule.java @@ -140,7 +140,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeWriteable(configurationOverrides); out.writeBoolean(enabled); - out.writeString(interval.toString()); + out.writeString(interval.expression()); out.writeOptionalInstant(lastSynced); out.writeString(name); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java index 3c08a5ac1e218..008cbca0cd5ea 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorScheduling.java @@ -222,7 +222,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(enabled); - out.writeString(interval.toString()); + out.writeString(interval.expression()); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index 41976bc6b4272..9b8cc7cfdbe4f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -8,13 +8,10 @@ package org.elasticsearch.xpack.application.connector; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -33,8 +30,6 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { - public static final NodeFeature CONNECTOR_TEMPLATES_FEATURE = new NodeFeature("elastic-connectors.templates"); - // This number must be incremented when we make changes to built-in templates. static final int REGISTRY_VERSION = 3; @@ -153,17 +148,13 @@ protected List getIngestPipelines() { ) ); - private final FeatureService featureService; - public ConnectorTemplateRegistry( ClusterService clusterService, - FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); - this.featureService = featureService; } @Override @@ -186,9 +177,4 @@ protected boolean requiresMasterNode() { // Necessary to prevent conflicts in some mixed-cluster environments with pre-7.7 nodes return true; } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - return featureService.clusterHasFeature(event.state(), CONNECTOR_TEMPLATES_FEATURE); - } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index a5e2d3f79da0e..3a61c848d3813 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -67,7 +67,10 @@ public QueryRulesetListItem(StreamInput in) throws IOException { } else { this.criteriaTypeToCountMap = Map.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + TransportVersion streamTransportVersion = in.getTransportVersion(); + if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) + || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + || streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { this.ruleTypeToCountMap = in.readMap(m -> in.readEnum(QueryRule.QueryRuleType.class), StreamInput::readInt); } else { this.ruleTypeToCountMap = Map.of(); @@ -100,7 +103,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { out.writeMap(criteriaTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); } - if (out.getTransportVersion().onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + TransportVersion streamTransportVersion = out.getTransportVersion(); + if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) + || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + || streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { out.writeMap(ruleTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRuleAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRuleAction.java index 4addd97465bf2..1660502d77920 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRuleAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRuleAction.java @@ -43,7 +43,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC PutQueryRuleAction.Request request = new PutQueryRuleAction.Request( restRequest.param("ruleset_id"), restRequest.param("rule_id"), - restRequest.content(), + restRequest.requiredContent(), restRequest.getXContentType() ); return channel -> client.execute( diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRulesetAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRulesetAction.java index a43ac70327e77..db20e66845f35 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRulesetAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/RestPutQueryRulesetAction.java @@ -42,7 +42,7 @@ public List routes() { protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeClient client) throws IOException { PutQueryRulesetAction.Request request = new PutQueryRulesetAction.Request( restRequest.param("ruleset_id"), - restRequest.content(), + restRequest.requiredContent(), restRequest.getXContentType() ); return channel -> client.execute( diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java index 681b14e8be61c..1099603e9be07 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java @@ -61,7 +61,7 @@ public List routes() { }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, isLicensed ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), isLicensed ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java index 50102b8cfcf53..fb2fb11c7460f 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.application.analytics; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -31,7 +30,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -42,7 +40,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.application.EnterpriseSearchFeatures; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; @@ -78,13 +75,7 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - registry = new AnalyticsTemplateRegistry( - clusterService, - new FeatureService(List.of(new EnterpriseSearchFeatures())), - threadPool, - client, - NamedXContentRegistry.EMPTY - ); + registry = new AnalyticsTemplateRegistry(clusterService, threadPool, client, NamedXContentRegistry.EMPTY); } @After @@ -282,25 +273,6 @@ public void testThatNonExistingPipelinesAreAddedImmediately() throws Exception { assertBusy(() -> assertThat(calledTimes.get(), equalTo(registry.getIngestPipelines().size()))); } - public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { - DiscoveryNode updatedNode = DiscoveryNodeUtils.create("updatedNode"); - DiscoveryNode outdatedNode = DiscoveryNodeUtils.create("outdatedNode", ESTestCase.buildNewFakeTransportAddress(), Version.V_8_7_0); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .localNodeId("updatedNode") - .masterNodeId("updatedNode") - .add(updatedNode) - .add(outdatedNode) - .build(); - - client.setVerifier((a, r, l) -> { - fail("if some cluster mode are not updated to at least v.8.8.0 nothing should happen"); - return null; - }); - - ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), Collections.emptyMap(), nodes); - registry.clusterChanged(event); - } - // ------------- /** diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java index 3fbc5cd749cb2..a4c7015afafcb 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.application.connector; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -31,7 +30,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -41,7 +39,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.application.EnterpriseSearchFeatures; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; @@ -81,8 +78,7 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - FeatureService featureService = new FeatureService(List.of(new EnterpriseSearchFeatures())); - registry = new ConnectorTemplateRegistry(clusterService, featureService, threadPool, client, NamedXContentRegistry.EMPTY); + registry = new ConnectorTemplateRegistry(clusterService, threadPool, client, NamedXContentRegistry.EMPTY); } @After @@ -310,25 +306,6 @@ public void testThatNonExistingPipelinesAreAddedImmediately() throws Exception { assertBusy(() -> assertThat(calledTimes.get(), equalTo(registry.getIngestPipelines().size()))); } - public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { - DiscoveryNode updatedNode = DiscoveryNodeUtils.create("updatedNode"); - DiscoveryNode outdatedNode = DiscoveryNodeUtils.create("outdatedNode", ESTestCase.buildNewFakeTransportAddress(), Version.V_8_9_0); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .localNodeId("updatedNode") - .masterNodeId("updatedNode") - .add(updatedNode) - .add(outdatedNode) - .build(); - - client.setVerifier((a, r, l) -> { - fail("if some cluster mode are not updated to at least v.8.10.0 nothing should happen"); - return null; - }); - - ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), Collections.emptyMap(), nodes); - registry.clusterChanged(event); - } - // ------------- /** diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java index 27ac214558f89..27d5e240534b2 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java @@ -59,7 +59,9 @@ protected ListQueryRulesetsAction.Response mutateInstanceForVersion( ListQueryRulesetsAction.Response instance, TransportVersion version ) { - if (version.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + if (version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) + || version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + || version.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { return instance; } else if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { List updatedResults = new ArrayList<>(); diff --git a/x-pack/plugin/eql/build.gradle b/x-pack/plugin/eql/build.gradle index cda236c3d02ae..9ae67f0e27c2b 100644 --- a/x-pack/plugin/eql/build.gradle +++ b/x-pack/plugin/eql/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -32,7 +38,7 @@ dependencies { * Enable QA/rest integration tests for snapshot builds only * * TODO: Enable for all builds upon this feature release * ****************************************************************/ -if (BuildParams.isSnapshotBuild()) { +if (buildParams.isSnapshotBuild()) { addQaCheckDependencies(project) } diff --git a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle index a16c24c852377..bc1a44f94d18a 100644 --- a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle +++ b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle @@ -5,9 +5,7 @@ * 2.0. */ - import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -15,7 +13,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.rest-resources' -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> /** * We execute tests 3 times. diff --git a/x-pack/plugin/eql/qa/correctness/build.gradle b/x-pack/plugin/eql/qa/correctness/build.gradle index d245dc444f0b8..7ca6e8f134d20 100644 --- a/x-pack/plugin/eql/qa/correctness/build.gradle +++ b/x-pack/plugin/eql/qa/correctness/build.gradle @@ -1,9 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.java' apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.internal-testclusters' import org.elasticsearch.gradle.testclusters.RunTask -import org.elasticsearch.gradle.internal.info.BuildParams dependencies { javaRestTestImplementation project(':test:framework') @@ -41,7 +47,7 @@ def runTaskCluster = testClusters.register('runTask') { tasks.named('javaRestTest').configure { onlyIf("FIPS mode disabled and service accoutn file available") { - serviceAccountFile && BuildParams.inFipsJvm == false + serviceAccountFile && buildParams.inFipsJvm == false } testLogging { diff --git a/x-pack/plugin/eql/qa/mixed-node/build.gradle b/x-pack/plugin/eql/qa/mixed-node/build.gradle index 8b9e082215fc4..bbeb439ab6155 100644 --- a/x-pack/plugin/eql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/eql/qa/mixed-node/build.gradle @@ -1,8 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.bwc-test' import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask dependencies { @@ -13,7 +19,7 @@ dependencies { tasks.named("javaRestTest").configure { enabled = false } -BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.0") && +buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.0") && v != VersionProperties.getElasticsearchVersion()) { bwcVersion, baseName -> def cluster = testClusters.register(baseName) { versions = [bwcVersion.toString(), project.version] diff --git a/x-pack/plugin/eql/qa/rest/build.gradle b/x-pack/plugin/eql/qa/rest/build.gradle index d5b0cc42091f3..0ffecefb934f7 100644 --- a/x-pack/plugin/eql/qa/rest/build.gradle +++ b/x-pack/plugin/eql/qa/rest/build.gradle @@ -1,10 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-test-artifact' -import org.elasticsearch.gradle.internal.info.BuildParams - restResources { restApi { include '_common', 'bulk', 'indices', 'eql' @@ -30,7 +35,7 @@ tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/eql/qa/security/build.gradle b/x-pack/plugin/eql/qa/security/build.gradle index 0641c47273f0e..9072a9a7bad3e 100644 --- a/x-pack/plugin/eql/qa/security/build.gradle +++ b/x-pack/plugin/eql/qa/security/build.gradle @@ -1,6 +1,11 @@ -apply plugin: 'elasticsearch.internal-java-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -import org.elasticsearch.gradle.internal.info.BuildParams +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { javaRestTestImplementation project(path: xpackModule('eql:qa:common')) @@ -10,7 +15,7 @@ tasks.named('javaRestTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java index 45f42a754910d..53debedafc3d8 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Attribute.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.expression; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -34,11 +33,6 @@ public abstract class Attribute extends NamedExpression { */ protected static final String SYNTHETIC_ATTRIBUTE_NAME_PREFIX = "$$"; - public static List getNamedWriteables() { - // TODO add UnsupportedAttribute when these are moved to the same project - return List.of(FieldAttribute.ENTRY, MetadataAttribute.ENTRY, ReferenceAttribute.ENTRY); - } - // can the attr be null - typically used in JOINs private final Nullability nullability; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java index e3eac60703915..a092e17931237 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/AttributeSet.java @@ -113,7 +113,7 @@ public T[] toArray(T[] a) { @Override public boolean add(Attribute e) { - return delegate.put(e, PRESENT) != null; + return delegate.put(e, PRESENT) == null; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java index 0be7f65d767c7..00765a8c0528c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/Expression.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.expression; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.capabilities.Resolvable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; @@ -15,7 +14,6 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.StringUtils; -import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -29,14 +27,6 @@ * (which is a type of expression) with a single child, c. */ public abstract class Expression extends Node implements Resolvable { - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - for (NamedWriteableRegistry.Entry e : NamedExpression.getNamedWriteables()) { - entries.add(new NamedWriteableRegistry.Entry(Expression.class, e.name, in -> (NamedExpression) e.reader.read(in))); - } - entries.add(Literal.ENTRY); - return entries; - } public static class TypeResolution { private final boolean failed; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ExpressionCoreWritables.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ExpressionCoreWritables.java new file mode 100644 index 0000000000000..174a0321a3057 --- /dev/null +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/ExpressionCoreWritables.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.core.expression; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +import java.util.ArrayList; +import java.util.List; + +public class ExpressionCoreWritables { + + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.addAll(expressions()); + entries.addAll(namedExpressions()); + entries.addAll(attributes()); + return entries; + } + + public static List expressions() { + List entries = new ArrayList<>(); + // add entries as expressions + for (NamedWriteableRegistry.Entry e : namedExpressions()) { + entries.add(new NamedWriteableRegistry.Entry(Expression.class, e.name, in -> (Expression) e.reader.read(in))); + } + entries.add(Literal.ENTRY); + return entries; + } + + public static List namedExpressions() { + List entries = new ArrayList<>(); + // add entries as named writeables + for (NamedWriteableRegistry.Entry e : attributes()) { + entries.add(new NamedWriteableRegistry.Entry(NamedExpression.class, e.name, in -> (NamedExpression) e.reader.read(in))); + } + entries.add(Alias.ENTRY); + return entries; + } + + public static List attributes() { + return List.of(FieldAttribute.ENTRY, MetadataAttribute.ENTRY, ReferenceAttribute.ENTRY); + } +} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java index 6e4e9292bfc99..0f1cfbb85039c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/MetadataAttribute.java @@ -31,6 +31,7 @@ public class MetadataAttribute extends TypedAttribute { public static final String TIMESTAMP_FIELD = "@timestamp"; public static final String TSID_FIELD = "_tsid"; + public static final String SCORE = "_score"; static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Attribute.class, @@ -50,7 +51,9 @@ public class MetadataAttribute extends TypedAttribute { SourceFieldMapper.NAME, tuple(DataType.SOURCE, false), IndexModeFieldMapper.NAME, - tuple(DataType.KEYWORD, true) + tuple(DataType.KEYWORD, true), + SCORE, + tuple(DataType.DOUBLE, false) ); private final boolean searchable; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java index 3b018f09e5ebd..f425fc7110a41 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/NamedExpression.java @@ -7,11 +7,9 @@ package org.elasticsearch.xpack.esql.core.expression; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.esql.core.tree.Source; -import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -20,14 +18,6 @@ * (by converting to an attribute). */ public abstract class NamedExpression extends Expression implements NamedWriteable { - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - for (NamedWriteableRegistry.Entry e : Attribute.getNamedWriteables()) { - entries.add(new NamedWriteableRegistry.Entry(NamedExpression.class, e.name, in -> (NamedExpression) e.reader.read(in))); - } - entries.add(Alias.ENTRY); - return entries; - } private final String name; private final NameId id; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java index cad5c631088f2..a1afcdbf1f77c 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/function/Function.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.esql.core.expression.function; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.Nullability; @@ -42,6 +43,11 @@ public Nullability nullable() { return Expressions.nullable(children()); } + /** Return true if this function can be executed under the provided {@link XPackLicenseState}, otherwise false.*/ + public boolean checkLicense(XPackLicenseState state) { + return true; + } + @Override public int hashCode() { return Objects.hash(getClass(), children()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java index 28bbf956fd71e..e63cc1fcf25fe 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/Predicates.java @@ -6,7 +6,9 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate; +import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; @@ -113,4 +115,43 @@ public static List subtract(List from, List } return diff.isEmpty() ? emptyList() : diff; } + + /** + * Given a list of expressions of predicates, extract a new expression of + * all the common ones and return it, along the original list with the + * common ones removed. + *

+ * Example: for ['field1 > 0 AND field2 > 0', 'field1 > 0 AND field3 > 0', + * 'field1 > 0'], the function will return 'field1 > 0' as the common + * predicate expression and ['field2 > 0', 'field3 > 0', Literal.TRUE] as + * the left predicates list. + * + * @param expressions list of expressions to extract common predicates from. + * @return a tuple having as the first element an expression of the common + * predicates and as the second element the list of expressions with the + * common predicates removed. If there are no common predicates, `null` will + * be returned as the first element and the original list as the second. If + * for one of the expressions in the input list, nothing is left after + * trimming the common predicates, it will be replaced with Literal.TRUE. + */ + public static Tuple> extractCommon(List expressions) { + List common = null; + List> splitAnds = new ArrayList<>(expressions.size()); + for (var expression : expressions) { + var split = splitAnd(expression); + common = common == null ? split : inCommon(split, common); + if (common.isEmpty()) { + return Tuple.tuple(null, expressions); + } + splitAnds.add(split); + } + + List trimmed = new ArrayList<>(expressions.size()); + final List finalCommon = common; + splitAnds.forEach(split -> { + var subtracted = subtract(split, finalCommon); + trimmed.add(subtracted.isEmpty() ? Literal.TRUE : combineAnd(subtracted)); + }); + return Tuple.tuple(combineAnd(common), trimmed); + } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java index 29a567e83211d..b23593804f8fe 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/FullTextPredicate.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.expression.predicate.fulltext; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -23,10 +22,6 @@ public abstract class FullTextPredicate extends Expression { - public static List getNamedWriteables() { - return List.of(MatchQueryPredicate.ENTRY, MultiMatchQueryPredicate.ENTRY, StringQueryPredicate.ENTRY); - } - public enum Operator { AND, OR; diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/StringQueryPredicate.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/StringQueryPredicate.java deleted file mode 100644 index 95000a5364e12..0000000000000 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/predicate/fulltext/StringQueryPredicate.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.esql.core.expression.predicate.fulltext; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.emptyList; - -public final class StringQueryPredicate extends FullTextPredicate { - - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Expression.class, - "StringQueryPredicate", - StringQueryPredicate::new - ); - - private final Map fields; - - public StringQueryPredicate(Source source, String query, String options) { - super(source, query, options, emptyList()); - - // inferred - this.fields = FullTextUtils.parseFields(optionMap(), source); - } - - StringQueryPredicate(StreamInput in) throws IOException { - super(in); - assert super.children().isEmpty(); - this.fields = FullTextUtils.parseFields(optionMap(), source()); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, StringQueryPredicate::new, query(), options()); - } - - @Override - public Expression replaceChildren(List newChildren) { - throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); - } - - public Map fields() { - return fields; - } - - @Override - public String getWriteableName() { - return ENTRY.name; - } -} diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java index b6383fac33299..7836522c77130 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/planner/ExpressionTranslators.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.MultiMatchQueryPredicate; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; @@ -26,7 +25,6 @@ import org.elasticsearch.xpack.esql.core.querydsl.query.MultiMatchQuery; import org.elasticsearch.xpack.esql.core.querydsl.query.NotQuery; import org.elasticsearch.xpack.esql.core.querydsl.query.Query; -import org.elasticsearch.xpack.esql.core.querydsl.query.QueryStringQuery; import org.elasticsearch.xpack.esql.core.querydsl.query.RegexQuery; import org.elasticsearch.xpack.esql.core.querydsl.query.WildcardQuery; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -73,18 +71,6 @@ private static Query translateField(RegexMatch e, String targetFieldName) { } } - public static class StringQueries extends ExpressionTranslator { - - @Override - protected Query asQuery(StringQueryPredicate q, TranslatorHandler handler) { - return doTranslate(q, handler); - } - - public static Query doTranslate(StringQueryPredicate q, TranslatorHandler handler) { - return new QueryStringQuery(q.source(), q.query(), q.fields(), q); - } - } - public static class MultiMatches extends ExpressionTranslator { @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/EsqlCorePlugin.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/EsqlCorePlugin.java index d84a471815a9a..61b480968e974 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/EsqlCorePlugin.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/EsqlCorePlugin.java @@ -12,7 +12,6 @@ import org.elasticsearch.plugins.Plugin; public class EsqlCorePlugin extends Plugin implements ExtensiblePlugin { - public static final FeatureFlag DATE_NANOS_FEATURE_FLAG = new FeatureFlag("esql_date_nanos"); public static final FeatureFlag SEMANTIC_TEXT_FEATURE_FLAG = new FeatureFlag("esql_semantic_text"); } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQuery.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQuery.java index 8ac90e6314174..8dcb87749ae48 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQuery.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQuery.java @@ -14,7 +14,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Collections; @@ -55,20 +54,13 @@ public class QueryStringQuery extends Query { private final String query; private final Map fields; - private StringQueryPredicate predicate; private final Map options; - // dedicated constructor for QueryTranslator - public QueryStringQuery(Source source, String query, String fieldName) { - this(source, query, Collections.singletonMap(fieldName, Float.valueOf(1.0f)), null); - } - - public QueryStringQuery(Source source, String query, Map fields, StringQueryPredicate predicate) { + public QueryStringQuery(Source source, String query, Map fields, Map options) { super(source); this.query = query; this.fields = fields; - this.predicate = predicate; - this.options = predicate == null ? Collections.emptyMap() : predicate.optionMap(); + this.options = options == null ? Collections.emptyMap() : options; } @Override @@ -95,7 +87,7 @@ public String query() { @Override public int hashCode() { - return Objects.hash(query, fields, predicate); + return Objects.hash(query, fields); } @Override @@ -109,7 +101,7 @@ public boolean equals(Object obj) { } QueryStringQuery other = (QueryStringQuery) obj; - return Objects.equals(query, other.query) && Objects.equals(fields, other.fields) && Objects.equals(predicate, other.predicate); + return Objects.equals(query, other.query) && Objects.equals(fields, other.fields) && Objects.equals(options, other.options); } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 9708a3ea0db85..1c65dd386667f 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -29,7 +29,6 @@ import java.util.function.Function; import static java.util.stream.Collectors.toMap; -import static java.util.stream.Collectors.toUnmodifiableMap; import static org.elasticsearch.xpack.esql.core.util.PlanStreamInput.readCachedStringWithVersionCheck; import static org.elasticsearch.xpack.esql.core.util.PlanStreamOutput.writeCachedStringWithVersionCheck; @@ -210,7 +209,6 @@ public enum DataType { * check that sending them to a function produces a sane error message. */ public static final Map UNDER_CONSTRUCTION = Map.ofEntries( - Map.entry(DATE_NANOS, EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), Map.entry(SEMANTIC_TEXT, EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG) ); @@ -276,7 +274,7 @@ public enum DataType { private static final Collection STRING_TYPES = DataType.types().stream().filter(DataType::isString).toList(); - private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); + private static final Map NAME_TO_TYPE; private static final Map ES_TO_TYPE; @@ -287,6 +285,10 @@ public enum DataType { map.put("point", DataType.CARTESIAN_POINT); map.put("shape", DataType.CARTESIAN_SHAPE); ES_TO_TYPE = Collections.unmodifiableMap(map); + // DATETIME has different esType and typeName, add an entry in NAME_TO_TYPE with date as key + map = TYPES.stream().collect(toMap(DataType::typeName, t -> t)); + map.put("date", DataType.DATETIME); + NAME_TO_TYPE = Collections.unmodifiableMap(map); } private static final Map NAME_OR_ALIAS_TO_TYPE; @@ -588,6 +590,13 @@ public DataType noText() { return isString(this) ? KEYWORD : this; } + public boolean isDate() { + return switch (this) { + case DATETIME, DATE_NANOS -> true; + default -> false; + }; + } + /** * Named parameters with default values. It's just easier to do this with * a builder in java.... diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQueryTests.java b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQueryTests.java index 0f80011961092..22e7b93e84ce1 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQueryTests.java +++ b/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/querydsl/query/QueryStringQueryTests.java @@ -10,42 +10,40 @@ import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.util.StringUtils; import java.util.Collections; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; public class QueryStringQueryTests extends ESTestCase { public void testQueryBuilding() { - QueryStringQueryBuilder qb = getBuilder("lenient=true"); + QueryStringQueryBuilder qb = getBuilder(Map.of("lenient", "true")); assertThat(qb.lenient(), equalTo(true)); - qb = getBuilder("lenient=true;default_operator=AND"); + qb = getBuilder(Map.of("lenient", "true", "default_operator", "AND")); assertThat(qb.lenient(), equalTo(true)); assertThat(qb.defaultOperator(), equalTo(Operator.AND)); - Exception e = expectThrows(IllegalArgumentException.class, () -> getBuilder("pizza=yummy")); + Exception e = expectThrows(IllegalArgumentException.class, () -> getBuilder(Map.of("pizza", "yummy"))); assertThat(e.getMessage(), equalTo("illegal query_string option [pizza]")); - e = expectThrows(ElasticsearchParseException.class, () -> getBuilder("type=aoeu")); + e = expectThrows(ElasticsearchParseException.class, () -> getBuilder(Map.of("type", "aoeu"))); assertThat(e.getMessage(), equalTo("failed to parse [multi_match] query type [aoeu]. unknown type.")); } - private static QueryStringQueryBuilder getBuilder(String options) { + private static QueryStringQueryBuilder getBuilder(Map options) { final Source source = new Source(1, 1, StringUtils.EMPTY); - final StringQueryPredicate mmqp = new StringQueryPredicate(source, "eggplant", options); - final QueryStringQuery mmq = new QueryStringQuery(source, "eggplant", Collections.singletonMap("foo", 1.0f), mmqp); - return (QueryStringQueryBuilder) mmq.asBuilder(); + final QueryStringQuery query = new QueryStringQuery(source, "eggplant", Collections.singletonMap("foo", 1.0f), options); + return (QueryStringQueryBuilder) query.asBuilder(); } public void testToString() { final Source source = new Source(1, 1, StringUtils.EMPTY); - final StringQueryPredicate mmqp = new StringQueryPredicate(source, "eggplant", ""); - final QueryStringQuery mmq = new QueryStringQuery(source, "eggplant", Collections.singletonMap("foo", 1.0f), mmqp); + final QueryStringQuery mmq = new QueryStringQuery(source, "eggplant", Collections.singletonMap("foo", 1.0f), Map.of()); assertEquals("QueryStringQuery@1:2[{foo=1.0}:eggplant]", mmq.toString()); } } diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index 1cf39f06f77c8..02f9752d21e09 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -1,8 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + plugins { id 'idea' } -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask; import org.elasticsearch.gradle.internal.util.SourceDirectoryCommandLineArgumentProvider; import static org.elasticsearch.gradle.util.PlatformUtils.normalize @@ -28,6 +34,7 @@ dependencies { compileOnly project(':modules:lang-painless:spi') compileOnly project(xpackModule('esql-core')) compileOnly project(xpackModule('ml')) + implementation project(xpackModule('kql')) implementation project('compute') implementation project('compute:ann') implementation project(':libs:dissect') @@ -44,6 +51,7 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(path: xpackModule('enrich')) testImplementation project(path: xpackModule('spatial')) + testImplementation project(path: xpackModule('kql')) testImplementation project(path: ':modules:reindex') testImplementation project(path: ':modules:parent-join') @@ -74,7 +82,7 @@ interface Injected { } tasks.named("test").configure { - if (BuildParams.isCi() == false) { + if (buildParams.isCi() == false) { systemProperty 'generateDocs', true def injected = project.objects.newInstance(Injected) doFirst { @@ -83,7 +91,6 @@ tasks.named("test").configure { } } File functionsFolder = file("build/testrun/test/temp/esql/functions") - File signatureFolder = file("build/testrun/test/temp/esql/functions/signature") File typesFolder = file("build/testrun/test/temp/esql/functions/types") def functionsDocFolder = file("${rootDir}/docs/reference/esql/functions") def effectiveProjectDir = projectDir @@ -145,7 +152,7 @@ tasks.named("test").configure { * Enable QA/rest integration tests for snapshot builds only * * TODO: Enable for all builds upon this feature release * ****************************************************************/ -if (BuildParams.isSnapshotBuild()) { +if (buildParams.isSnapshotBuild()) { addQaCheckDependencies(project) } diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 3deac4925c951..8e866cec3f421 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -11,11 +11,13 @@ base { dependencies { compileOnly project(':server') compileOnly project('ann') + compileOnly project(xpackModule('core')) compileOnly project(xpackModule('ml')) annotationProcessor project('gen') implementation 'com.carrotsearch:hppc:0.8.1' - testImplementation project(':test:framework') + testImplementation(project(':modules:analysis-common')) + testImplementation(project(':test:framework')) testImplementation(project(xpackModule('esql-core'))) testImplementation(project(xpackModule('core'))) testImplementation(project(xpackModule('ml'))) @@ -608,6 +610,27 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/aggregation/RateDoubleAggregator.java" } + File stdDevAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st") + template { + it.properties = intProperties + it.inputFile = stdDevAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/StdDevIntAggregator.java" + } + template { + it.properties = longProperties + it.inputFile = stdDevAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/StdDevLongAggregator.java" + } + template { + it.properties = floatProperties + it.inputFile = stdDevAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/StdDevFloatAggregator.java" + } + template { + it.properties = doubleProperties + it.inputFile = stdDevAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/StdDevDoubleAggregator.java" + } File topAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-TopAggregator.java.st") template { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevDoubleAggregator.java new file mode 100644 index 0000000000000..3a1185d34fa23 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevDoubleAggregator.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * A standard deviation aggregation definition for double. + * This class is generated. Edit `X-StdDevAggregator.java.st` instead. + */ +@Aggregator( + { + @IntermediateState(name = "mean", type = "DOUBLE"), + @IntermediateState(name = "m2", type = "DOUBLE"), + @IntermediateState(name = "count", type = "LONG") } +) +@GroupingAggregator +public class StdDevDoubleAggregator { + + public static StdDevStates.SingleState initSingle() { + return new StdDevStates.SingleState(); + } + + public static void combine(StdDevStates.SingleState state, double value) { + state.add(value); + } + + public static void combineIntermediate(StdDevStates.SingleState state, double mean, double m2, long count) { + state.combine(mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.SingleState state, DriverContext driverContext) { + return state.evaluateFinal(driverContext); + } + + public static StdDevStates.GroupingState initGrouping(BigArrays bigArrays) { + return new StdDevStates.GroupingState(bigArrays); + } + + public static void combine(StdDevStates.GroupingState current, int groupId, double value) { + current.add(groupId, value); + } + + public static void combineStates(StdDevStates.GroupingState current, int groupId, StdDevStates.GroupingState state, int statePosition) { + current.combine(groupId, state.getOrNull(statePosition)); + } + + public static void combineIntermediate(StdDevStates.GroupingState state, int groupId, double mean, double m2, long count) { + state.combine(groupId, mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevFloatAggregator.java new file mode 100644 index 0000000000000..51c22e7e29c1e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevFloatAggregator.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * A standard deviation aggregation definition for float. + * This class is generated. Edit `X-StdDevAggregator.java.st` instead. + */ +@Aggregator( + { + @IntermediateState(name = "mean", type = "DOUBLE"), + @IntermediateState(name = "m2", type = "DOUBLE"), + @IntermediateState(name = "count", type = "LONG") } +) +@GroupingAggregator +public class StdDevFloatAggregator { + + public static StdDevStates.SingleState initSingle() { + return new StdDevStates.SingleState(); + } + + public static void combine(StdDevStates.SingleState state, float value) { + state.add(value); + } + + public static void combineIntermediate(StdDevStates.SingleState state, double mean, double m2, long count) { + state.combine(mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.SingleState state, DriverContext driverContext) { + return state.evaluateFinal(driverContext); + } + + public static StdDevStates.GroupingState initGrouping(BigArrays bigArrays) { + return new StdDevStates.GroupingState(bigArrays); + } + + public static void combine(StdDevStates.GroupingState current, int groupId, float value) { + current.add(groupId, value); + } + + public static void combineStates(StdDevStates.GroupingState current, int groupId, StdDevStates.GroupingState state, int statePosition) { + current.combine(groupId, state.getOrNull(statePosition)); + } + + public static void combineIntermediate(StdDevStates.GroupingState state, int groupId, double mean, double m2, long count) { + state.combine(groupId, mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevIntAggregator.java new file mode 100644 index 0000000000000..24eae35cb3249 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevIntAggregator.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * A standard deviation aggregation definition for int. + * This class is generated. Edit `X-StdDevAggregator.java.st` instead. + */ +@Aggregator( + { + @IntermediateState(name = "mean", type = "DOUBLE"), + @IntermediateState(name = "m2", type = "DOUBLE"), + @IntermediateState(name = "count", type = "LONG") } +) +@GroupingAggregator +public class StdDevIntAggregator { + + public static StdDevStates.SingleState initSingle() { + return new StdDevStates.SingleState(); + } + + public static void combine(StdDevStates.SingleState state, int value) { + state.add(value); + } + + public static void combineIntermediate(StdDevStates.SingleState state, double mean, double m2, long count) { + state.combine(mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.SingleState state, DriverContext driverContext) { + return state.evaluateFinal(driverContext); + } + + public static StdDevStates.GroupingState initGrouping(BigArrays bigArrays) { + return new StdDevStates.GroupingState(bigArrays); + } + + public static void combine(StdDevStates.GroupingState current, int groupId, int value) { + current.add(groupId, value); + } + + public static void combineStates(StdDevStates.GroupingState current, int groupId, StdDevStates.GroupingState state, int statePosition) { + current.combine(groupId, state.getOrNull(statePosition)); + } + + public static void combineIntermediate(StdDevStates.GroupingState state, int groupId, double mean, double m2, long count) { + state.combine(groupId, mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevLongAggregator.java new file mode 100644 index 0000000000000..888ace30a0c8e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/StdDevLongAggregator.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * A standard deviation aggregation definition for long. + * This class is generated. Edit `X-StdDevAggregator.java.st` instead. + */ +@Aggregator( + { + @IntermediateState(name = "mean", type = "DOUBLE"), + @IntermediateState(name = "m2", type = "DOUBLE"), + @IntermediateState(name = "count", type = "LONG") } +) +@GroupingAggregator +public class StdDevLongAggregator { + + public static StdDevStates.SingleState initSingle() { + return new StdDevStates.SingleState(); + } + + public static void combine(StdDevStates.SingleState state, long value) { + state.add(value); + } + + public static void combineIntermediate(StdDevStates.SingleState state, double mean, double m2, long count) { + state.combine(mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.SingleState state, DriverContext driverContext) { + return state.evaluateFinal(driverContext); + } + + public static StdDevStates.GroupingState initGrouping(BigArrays bigArrays) { + return new StdDevStates.GroupingState(bigArrays); + } + + public static void combine(StdDevStates.GroupingState current, int groupId, long value) { + current.add(groupId, value); + } + + public static void combineStates(StdDevStates.GroupingState current, int groupId, StdDevStates.GroupingState state, int statePosition) { + current.combine(groupId, state.getOrNull(statePosition)); + } + + public static void combineIntermediate(StdDevStates.GroupingState state, int groupId, double mean, double m2, long count) { + state.combine(groupId, mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleAggregatorFunction.java new file mode 100644 index 0000000000000..dd6cc89401a99 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleAggregatorFunction.java @@ -0,0 +1,178 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link StdDevDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevDoubleAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final StdDevStates.SingleState state; + + private final List channels; + + public StdDevDoubleAggregatorFunction(DriverContext driverContext, List channels, + StdDevStates.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static StdDevDoubleAggregatorFunction create(DriverContext driverContext, + List channels) { + return new StdDevDoubleAggregatorFunction(driverContext, channels, StdDevDoubleAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + return; + } + if (mask.allTrue()) { + // No masking + DoubleBlock block = page.getBlock(channels.get(0)); + DoubleVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + return; + } + // Some positions masked away, others kept + DoubleBlock block = page.getBlock(channels.get(0)); + DoubleVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector, mask); + } else { + addRawBlock(block, mask); + } + } + + private void addRawVector(DoubleVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + StdDevDoubleAggregator.combine(state, vector.getDouble(i)); + } + } + + private void addRawVector(DoubleVector vector, BooleanVector mask) { + for (int i = 0; i < vector.getPositionCount(); i++) { + if (mask.getBoolean(i) == false) { + continue; + } + StdDevDoubleAggregator.combine(state, vector.getDouble(i)); + } + } + + private void addRawBlock(DoubleBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevDoubleAggregator.combine(state, block.getDouble(i)); + } + } + } + + private void addRawBlock(DoubleBlock block, BooleanVector mask) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevDoubleAggregator.combine(state, block.getDouble(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + assert mean.getPositionCount() == 1; + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + assert m2.getPositionCount() == 1; + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert count.getPositionCount() == 1; + StdDevDoubleAggregator.combineIntermediate(state, mean.getDouble(0), m2.getDouble(0), count.getLong(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = StdDevDoubleAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..313eed4ae97ae --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link StdDevDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public StdDevDoubleAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public StdDevDoubleAggregatorFunction aggregator(DriverContext driverContext) { + return StdDevDoubleAggregatorFunction.create(driverContext, channels); + } + + @Override + public StdDevDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return StdDevDoubleGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "std_dev of doubles"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..da49c254e353a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevDoubleGroupingAggregatorFunction.java @@ -0,0 +1,223 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link StdDevDoubleAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final StdDevStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public StdDevDoubleGroupingAggregatorFunction(List channels, + StdDevStates.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static StdDevDoubleGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new StdDevDoubleGroupingAggregatorFunction(channels, StdDevDoubleAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + DoubleBlock valuesBlock = page.getBlock(channels.get(0)); + DoubleVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevDoubleAggregator.combine(state, groupId, values.getDouble(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevDoubleAggregator.combine(state, groupId, values.getDouble(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, DoubleVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + StdDevDoubleAggregator.combine(state, groupId, values.getDouble(groupPosition + positionOffset)); + } + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert mean.getPositionCount() == m2.getPositionCount() && mean.getPositionCount() == count.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevDoubleAggregator.combineIntermediate(state, groupId, mean.getDouble(groupPosition + positionOffset), m2.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + StdDevStates.GroupingState inState = ((StdDevDoubleGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + StdDevDoubleAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = StdDevDoubleAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatAggregatorFunction.java new file mode 100644 index 0000000000000..bf8c4854f6b93 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatAggregatorFunction.java @@ -0,0 +1,180 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link StdDevFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final StdDevStates.SingleState state; + + private final List channels; + + public StdDevFloatAggregatorFunction(DriverContext driverContext, List channels, + StdDevStates.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static StdDevFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new StdDevFloatAggregatorFunction(driverContext, channels, StdDevFloatAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + return; + } + if (mask.allTrue()) { + // No masking + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + return; + } + // Some positions masked away, others kept + FloatBlock block = page.getBlock(channels.get(0)); + FloatVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector, mask); + } else { + addRawBlock(block, mask); + } + } + + private void addRawVector(FloatVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + StdDevFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawVector(FloatVector vector, BooleanVector mask) { + for (int i = 0; i < vector.getPositionCount(); i++) { + if (mask.getBoolean(i) == false) { + continue; + } + StdDevFloatAggregator.combine(state, vector.getFloat(i)); + } + } + + private void addRawBlock(FloatBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + private void addRawBlock(FloatBlock block, BooleanVector mask) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevFloatAggregator.combine(state, block.getFloat(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + assert mean.getPositionCount() == 1; + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + assert m2.getPositionCount() == 1; + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert count.getPositionCount() == 1; + StdDevFloatAggregator.combineIntermediate(state, mean.getDouble(0), m2.getDouble(0), count.getLong(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = StdDevFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..25dfa54895eda --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link StdDevFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public StdDevFloatAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public StdDevFloatAggregatorFunction aggregator(DriverContext driverContext) { + return StdDevFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public StdDevFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return StdDevFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "std_dev of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..bf994aaf2840e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevFloatGroupingAggregatorFunction.java @@ -0,0 +1,225 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link StdDevFloatAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final StdDevStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public StdDevFloatGroupingAggregatorFunction(List channels, + StdDevStates.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static StdDevFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new StdDevFloatGroupingAggregatorFunction(channels, StdDevFloatAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valuesBlock = page.getBlock(channels.get(0)); + FloatVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevFloatAggregator.combine(state, groupId, values.getFloat(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, FloatVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + StdDevFloatAggregator.combine(state, groupId, values.getFloat(groupPosition + positionOffset)); + } + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert mean.getPositionCount() == m2.getPositionCount() && mean.getPositionCount() == count.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevFloatAggregator.combineIntermediate(state, groupId, mean.getDouble(groupPosition + positionOffset), m2.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + StdDevStates.GroupingState inState = ((StdDevFloatGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + StdDevFloatAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = StdDevFloatAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntAggregatorFunction.java new file mode 100644 index 0000000000000..4a5585a7dd454 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntAggregatorFunction.java @@ -0,0 +1,180 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link StdDevIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevIntAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final StdDevStates.SingleState state; + + private final List channels; + + public StdDevIntAggregatorFunction(DriverContext driverContext, List channels, + StdDevStates.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static StdDevIntAggregatorFunction create(DriverContext driverContext, + List channels) { + return new StdDevIntAggregatorFunction(driverContext, channels, StdDevIntAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + return; + } + if (mask.allTrue()) { + // No masking + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + return; + } + // Some positions masked away, others kept + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector, mask); + } else { + addRawBlock(block, mask); + } + } + + private void addRawVector(IntVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + StdDevIntAggregator.combine(state, vector.getInt(i)); + } + } + + private void addRawVector(IntVector vector, BooleanVector mask) { + for (int i = 0; i < vector.getPositionCount(); i++) { + if (mask.getBoolean(i) == false) { + continue; + } + StdDevIntAggregator.combine(state, vector.getInt(i)); + } + } + + private void addRawBlock(IntBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevIntAggregator.combine(state, block.getInt(i)); + } + } + } + + private void addRawBlock(IntBlock block, BooleanVector mask) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevIntAggregator.combine(state, block.getInt(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + assert mean.getPositionCount() == 1; + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + assert m2.getPositionCount() == 1; + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert count.getPositionCount() == 1; + StdDevIntAggregator.combineIntermediate(state, mean.getDouble(0), m2.getDouble(0), count.getLong(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = StdDevIntAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..5a762d6606a25 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link StdDevIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public StdDevIntAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public StdDevIntAggregatorFunction aggregator(DriverContext driverContext) { + return StdDevIntAggregatorFunction.create(driverContext, channels); + } + + @Override + public StdDevIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return StdDevIntGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "std_dev of ints"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..139cc24d3541f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevIntGroupingAggregatorFunction.java @@ -0,0 +1,223 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link StdDevIntAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final StdDevStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public StdDevIntGroupingAggregatorFunction(List channels, + StdDevStates.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static StdDevIntGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new StdDevIntGroupingAggregatorFunction(channels, StdDevIntAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valuesBlock = page.getBlock(channels.get(0)); + IntVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevIntAggregator.combine(state, groupId, values.getInt(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevIntAggregator.combine(state, groupId, values.getInt(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + StdDevIntAggregator.combine(state, groupId, values.getInt(groupPosition + positionOffset)); + } + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert mean.getPositionCount() == m2.getPositionCount() && mean.getPositionCount() == count.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevIntAggregator.combineIntermediate(state, groupId, mean.getDouble(groupPosition + positionOffset), m2.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + StdDevStates.GroupingState inState = ((StdDevIntGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + StdDevIntAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = StdDevIntAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongAggregatorFunction.java new file mode 100644 index 0000000000000..b5ed31116a90c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongAggregatorFunction.java @@ -0,0 +1,178 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link StdDevLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevLongAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final StdDevStates.SingleState state; + + private final List channels; + + public StdDevLongAggregatorFunction(DriverContext driverContext, List channels, + StdDevStates.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static StdDevLongAggregatorFunction create(DriverContext driverContext, + List channels) { + return new StdDevLongAggregatorFunction(driverContext, channels, StdDevLongAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + return; + } + if (mask.allTrue()) { + // No masking + LongBlock block = page.getBlock(channels.get(0)); + LongVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + return; + } + // Some positions masked away, others kept + LongBlock block = page.getBlock(channels.get(0)); + LongVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector, mask); + } else { + addRawBlock(block, mask); + } + } + + private void addRawVector(LongVector vector) { + for (int i = 0; i < vector.getPositionCount(); i++) { + StdDevLongAggregator.combine(state, vector.getLong(i)); + } + } + + private void addRawVector(LongVector vector, BooleanVector mask) { + for (int i = 0; i < vector.getPositionCount(); i++) { + if (mask.getBoolean(i) == false) { + continue; + } + StdDevLongAggregator.combine(state, vector.getLong(i)); + } + } + + private void addRawBlock(LongBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevLongAggregator.combine(state, block.getLong(i)); + } + } + } + + private void addRawBlock(LongBlock block, BooleanVector mask) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + for (int i = start; i < end; i++) { + StdDevLongAggregator.combine(state, block.getLong(i)); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + assert mean.getPositionCount() == 1; + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + assert m2.getPositionCount() == 1; + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert count.getPositionCount() == 1; + StdDevLongAggregator.combineIntermediate(state, mean.getDouble(0), m2.getDouble(0), count.getLong(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = StdDevLongAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..09b996201ef16 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongAggregatorFunctionSupplier.java @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link StdDevLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public StdDevLongAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public StdDevLongAggregatorFunction aggregator(DriverContext driverContext) { + return StdDevLongAggregatorFunction.create(driverContext, channels); + } + + @Override + public StdDevLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext) { + return StdDevLongGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "std_dev of longs"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..da7a5f4bdea0d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/StdDevLongGroupingAggregatorFunction.java @@ -0,0 +1,223 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link StdDevLongAggregator}. + * This class is generated. Do not edit it. + */ +public final class StdDevLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("mean", ElementType.DOUBLE), + new IntermediateStateDesc("m2", ElementType.DOUBLE), + new IntermediateStateDesc("count", ElementType.LONG) ); + + private final StdDevStates.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public StdDevLongGroupingAggregatorFunction(List channels, + StdDevStates.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static StdDevLongGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new StdDevLongGroupingAggregatorFunction(channels, StdDevLongAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + LongBlock valuesBlock = page.getBlock(channels.get(0)); + LongVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevLongAggregator.combine(state, groupId, values.getLong(v)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); + } + } + + private void addRawInput(int positionOffset, IntBlock groups, LongBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + StdDevLongAggregator.combine(state, groupId, values.getLong(v)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, LongVector values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + StdDevLongAggregator.combine(state, groupId, values.getLong(groupPosition + positionOffset)); + } + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block meanUncast = page.getBlock(channels.get(0)); + if (meanUncast.areAllValuesNull()) { + return; + } + DoubleVector mean = ((DoubleBlock) meanUncast).asVector(); + Block m2Uncast = page.getBlock(channels.get(1)); + if (m2Uncast.areAllValuesNull()) { + return; + } + DoubleVector m2 = ((DoubleBlock) m2Uncast).asVector(); + Block countUncast = page.getBlock(channels.get(2)); + if (countUncast.areAllValuesNull()) { + return; + } + LongVector count = ((LongBlock) countUncast).asVector(); + assert mean.getPositionCount() == m2.getPositionCount() && mean.getPositionCount() == count.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + StdDevLongAggregator.combineIntermediate(state, groupId, mean.getDouble(groupPosition + positionOffset), m2.getDouble(groupPosition + positionOffset), count.getLong(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + StdDevStates.GroupingState inState = ((StdDevLongGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + StdDevLongAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = StdDevLongAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/module-info.java b/x-pack/plugin/esql/compute/src/main/java/module-info.java index 573d9e048a4d4..1b3253694b298 100644 --- a/x-pack/plugin/esql/compute/src/main/java/module-info.java +++ b/x-pack/plugin/esql/compute/src/main/java/module-info.java @@ -19,6 +19,7 @@ requires org.elasticsearch.ml; requires org.elasticsearch.tdigest; requires org.elasticsearch.geo; + requires org.elasticsearch.xcore; requires hppc; exports org.elasticsearch.compute; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/StdDevStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/StdDevStates.java new file mode 100644 index 0000000000000..bff8903fd3bec --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/StdDevStates.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +public final class StdDevStates { + + private StdDevStates() {} + + static final class SingleState implements AggregatorState { + + private final WelfordAlgorithm welfordAlgorithm; + + SingleState() { + this(0, 0, 0); + } + + SingleState(double mean, double m2, long count) { + this.welfordAlgorithm = new WelfordAlgorithm(mean, m2, count); + } + + public void add(long value) { + welfordAlgorithm.add(value); + } + + public void add(double value) { + welfordAlgorithm.add(value); + } + + public void add(int value) { + welfordAlgorithm.add(value); + } + + public void combine(double mean, double m2, long count) { + welfordAlgorithm.add(mean, m2, count); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + assert blocks.length >= offset + 3; + BlockFactory blockFactory = driverContext.blockFactory(); + blocks[offset + 0] = blockFactory.newConstantDoubleBlockWith(mean(), 1); + blocks[offset + 1] = blockFactory.newConstantDoubleBlockWith(m2(), 1); + blocks[offset + 2] = blockFactory.newConstantLongBlockWith(count(), 1); + } + + @Override + public void close() {} + + public double mean() { + return welfordAlgorithm.mean(); + } + + public double m2() { + return welfordAlgorithm.m2(); + } + + public long count() { + return welfordAlgorithm.count(); + } + + public double evaluateFinal() { + return welfordAlgorithm.evaluate(); + } + + public Block evaluateFinal(DriverContext driverContext) { + final long count = count(); + final double m2 = m2(); + if (count == 0 || Double.isFinite(m2) == false) { + return driverContext.blockFactory().newConstantNullBlock(1); + } + return driverContext.blockFactory().newConstantDoubleBlockWith(evaluateFinal(), 1); + } + } + + static final class GroupingState implements GroupingAggregatorState { + + private ObjectArray states; + private final BigArrays bigArrays; + + GroupingState(BigArrays bigArrays) { + this.states = bigArrays.newObjectArray(1); + this.bigArrays = bigArrays; + } + + WelfordAlgorithm getOrNull(int position) { + if (position < states.size()) { + return states.get(position); + } else { + return null; + } + } + + public void combine(int groupId, WelfordAlgorithm state) { + if (state == null) { + return; + } + combine(groupId, state.mean(), state.m2(), state.count()); + } + + public void combine(int groupId, double meanValue, double m2Value, long countValue) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new WelfordAlgorithm(meanValue, m2Value, countValue); + states.set(groupId, state); + } else { + state.add(meanValue, m2Value, countValue); + } + } + + public WelfordAlgorithm getOrSet(int groupId) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + state = new WelfordAlgorithm(); + states.set(groupId, state); + } + return state; + } + + public void add(int groupId, long value) { + var state = getOrSet(groupId); + state.add(value); + } + + public void add(int groupId, double value) { + var state = getOrSet(groupId); + state.add(value); + } + + public void add(int groupId, int value) { + var state = getOrSet(groupId); + state.add(value); + } + + private void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + try ( + var meanBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var m2Builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var countBuilder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); + ) { + for (int i = 0; i < selected.getPositionCount(); i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + meanBuilder.appendDouble(state.mean()); + m2Builder.appendDouble(state.m2()); + countBuilder.appendLong(state.count()); + } else { + meanBuilder.appendDouble(0.0); + m2Builder.appendDouble(0.0); + countBuilder.appendLong(0); + } + } + blocks[offset + 0] = meanBuilder.build(); + blocks[offset + 1] = m2Builder.build(); + blocks[offset + 2] = countBuilder.build(); + } + } + + public Block evaluateFinal(IntVector selected, DriverContext driverContext) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + final var groupId = selected.getInt(i); + final var st = getOrNull(groupId); + if (st != null) { + final var m2 = st.m2(); + final var count = st.count(); + if (count == 0 || Double.isFinite(m2) == false) { + builder.appendNull(); + } else { + builder.appendDouble(st.evaluate()); + } + } else { + builder.appendNull(); + } + } + return builder.build(); + } + } + + @Override + public void close() { + Releasables.close(states); + } + + void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/WelfordAlgorithm.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/WelfordAlgorithm.java new file mode 100644 index 0000000000000..8ccb985507247 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/WelfordAlgorithm.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +/** + * Algorithm for calculating standard deviation, one value at a time. + * + * @see + * Welford's_online_algorithm and + * + * Parallel algorithm + */ +public final class WelfordAlgorithm { + private double mean; + private double m2; + private long count; + + public double mean() { + return mean; + } + + public double m2() { + return m2; + } + + public long count() { + return count; + } + + public WelfordAlgorithm() { + this(0, 0, 0); + } + + public WelfordAlgorithm(double mean, double m2, long count) { + this.mean = mean; + this.m2 = m2; + this.count = count; + } + + public void add(int value) { + add((double) value); + } + + public void add(long value) { + add((double) value); + } + + public void add(double value) { + final double delta = value - mean; + count += 1; + mean += delta / count; + m2 += delta * (value - mean); + } + + public void add(double meanValue, double m2Value, long countValue) { + if (countValue == 0) { + return; + } + if (count == 0) { + mean = meanValue; + m2 = m2Value; + count = countValue; + return; + } + double delta = mean - meanValue; + m2 += m2Value + delta * delta * count * countValue / (count + countValue); + mean = (mean * count + meanValue * countValue) / (count + countValue); + count += countValue; + } + + public double evaluate() { + return count < 2 ? 0 : Math.sqrt(m2 / count); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st new file mode 100644 index 0000000000000..510d770f90d62 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * A standard deviation aggregation definition for $type$. + * This class is generated. Edit `X-StdDevAggregator.java.st` instead. + */ +@Aggregator( + { + @IntermediateState(name = "mean", type = "DOUBLE"), + @IntermediateState(name = "m2", type = "DOUBLE"), + @IntermediateState(name = "count", type = "LONG") } +) +@GroupingAggregator +public class StdDev$Type$Aggregator { + + public static StdDevStates.SingleState initSingle() { + return new StdDevStates.SingleState(); + } + + public static void combine(StdDevStates.SingleState state, $type$ value) { + state.add(value); + } + + public static void combineIntermediate(StdDevStates.SingleState state, double mean, double m2, long count) { + state.combine(mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.SingleState state, DriverContext driverContext) { + return state.evaluateFinal(driverContext); + } + + public static StdDevStates.GroupingState initGrouping(BigArrays bigArrays) { + return new StdDevStates.GroupingState(bigArrays); + } + + public static void combine(StdDevStates.GroupingState current, int groupId, $type$ value) { + current.add(groupId, value); + } + + public static void combineStates(StdDevStates.GroupingState current, int groupId, StdDevStates.GroupingState state, int statePosition) { + current.combine(groupId, state.getOrNull(statePosition)); + } + + public static void combineIntermediate(StdDevStates.GroupingState state, int groupId, double mean, double m2, long count) { + state.combine(groupId, mean, m2, count); + } + + public static Block evaluateFinal(StdDevStates.GroupingState state, IntVector selected, DriverContext driverContext) { + return state.evaluateFinal(selected, driverContext); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java index 919cb92f79260..30afa7ae3128d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BlockHash.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.Int3Hash; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.common.util.LongLongHash; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.SeenGroupIds; import org.elasticsearch.compute.data.Block; @@ -24,6 +25,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.index.analysis.AnalysisRegistry; import java.util.Iterator; import java.util.List; @@ -58,9 +60,7 @@ * leave a big gap, even if we never see {@code null}. *

*/ -public abstract sealed class BlockHash implements Releasable, SeenGroupIds // - permits BooleanBlockHash, BytesRefBlockHash, DoubleBlockHash, IntBlockHash, LongBlockHash, BytesRef2BlockHash, BytesRef3BlockHash, // - NullBlockHash, PackedValuesBlockHash, BytesRefLongBlockHash, LongLongBlockHash, TimeSeriesBlockHash { +public abstract class BlockHash implements Releasable, SeenGroupIds { protected final BlockFactory blockFactory; @@ -107,7 +107,15 @@ public abstract sealed class BlockHash implements Releasable, SeenGroupIds // @Override public abstract BitArray seenGroupIds(BigArrays bigArrays); - public record GroupSpec(int channel, ElementType elementType) {} + /** + * @param isCategorize Whether this group is a CATEGORIZE() or not. + * May be changed in the future when more stateful grouping functions are added. + */ + public record GroupSpec(int channel, ElementType elementType, boolean isCategorize) { + public GroupSpec(int channel, ElementType elementType) { + this(channel, elementType, false); + } + } /** * Creates a specialized hash table that maps one or more {@link Block}s to ids. @@ -159,6 +167,22 @@ public static BlockHash buildPackedValuesBlockHash(List groups, Block return new PackedValuesBlockHash(groups, blockFactory, emitBatchSize); } + /** + * Builds a BlockHash for the Categorize grouping function. + */ + public static BlockHash buildCategorizeBlockHash( + List groups, + AggregatorMode aggregatorMode, + BlockFactory blockFactory, + AnalysisRegistry analysisRegistry + ) { + if (groups.size() != 1) { + throw new IllegalArgumentException("only a single CATEGORIZE group can used"); + } + + return new CategorizeBlockHash(blockFactory, groups.get(0).channel, aggregatorMode, analysisRegistry); + } + /** * Creates a specialized hash table that maps a {@link Block} of the given input element type to ids. */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java new file mode 100644 index 0000000000000..35c6faf84e623 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHash.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.BytesRefHash; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.xpack.core.ml.job.config.CategorizationAnalyzerConfig; +import org.elasticsearch.xpack.ml.aggs.categorization.CategorizationBytesRefHash; +import org.elasticsearch.xpack.ml.aggs.categorization.CategorizationPartOfSpeechDictionary; +import org.elasticsearch.xpack.ml.aggs.categorization.SerializableTokenListCategory; +import org.elasticsearch.xpack.ml.aggs.categorization.TokenListCategorizer; +import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Base BlockHash implementation for {@code Categorize} grouping function. + */ +public class CategorizeBlockHash extends BlockHash { + + private static final CategorizationAnalyzerConfig ANALYZER_CONFIG = CategorizationAnalyzerConfig.buildStandardCategorizationAnalyzer( + List.of() + ); + private static final int NULL_ORD = 0; + + // TODO: this should probably also take an emitBatchSize + private final int channel; + private final AggregatorMode aggregatorMode; + private final TokenListCategorizer.CloseableTokenListCategorizer categorizer; + + private final CategorizeEvaluator evaluator; + + /** + * Store whether we've seen any {@code null} values. + *

+ * Null gets the {@link #NULL_ORD} ord. + *

+ */ + private boolean seenNull = false; + + CategorizeBlockHash(BlockFactory blockFactory, int channel, AggregatorMode aggregatorMode, AnalysisRegistry analysisRegistry) { + super(blockFactory); + + this.channel = channel; + this.aggregatorMode = aggregatorMode; + + this.categorizer = new TokenListCategorizer.CloseableTokenListCategorizer( + new CategorizationBytesRefHash(new BytesRefHash(2048, blockFactory.bigArrays())), + CategorizationPartOfSpeechDictionary.getInstance(), + 0.70f + ); + + if (aggregatorMode.isInputPartial() == false) { + CategorizationAnalyzer analyzer; + try { + Objects.requireNonNull(analysisRegistry); + analyzer = new CategorizationAnalyzer(analysisRegistry, ANALYZER_CONFIG); + } catch (Exception e) { + categorizer.close(); + throw new RuntimeException(e); + } + this.evaluator = new CategorizeEvaluator(analyzer); + } else { + this.evaluator = null; + } + } + + @Override + public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { + if (aggregatorMode.isInputPartial() == false) { + addInitial(page, addInput); + } else { + addIntermediate(page, addInput); + } + } + + @Override + public Block[] getKeys() { + return new Block[] { aggregatorMode.isOutputPartial() ? buildIntermediateBlock() : buildFinalBlock() }; + } + + @Override + public IntVector nonEmpty() { + return IntVector.range(seenNull ? 0 : 1, categorizer.getCategoryCount() + 1, blockFactory); + } + + @Override + public BitArray seenGroupIds(BigArrays bigArrays) { + return new SeenGroupIds.Range(seenNull ? 0 : 1, Math.toIntExact(categorizer.getCategoryCount() + 1)).seenGroupIds(bigArrays); + } + + @Override + public final ReleasableIterator lookup(Page page, ByteSizeValue targetBlockSize) { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + Releasables.close(evaluator, categorizer); + } + + /** + * Adds initial (raw) input to the state. + */ + private void addInitial(Page page, GroupingAggregatorFunction.AddInput addInput) { + try (IntBlock result = (IntBlock) evaluator.eval(page.getBlock(channel))) { + addInput.add(0, result); + } + } + + /** + * Adds intermediate state to the state. + */ + private void addIntermediate(Page page, GroupingAggregatorFunction.AddInput addInput) { + if (page.getPositionCount() == 0) { + return; + } + BytesRefBlock categorizerState = page.getBlock(channel); + if (categorizerState.areAllValuesNull()) { + seenNull = true; + try (var newIds = blockFactory.newConstantIntVector(NULL_ORD, 1)) { + addInput.add(0, newIds); + } + return; + } + + Map idMap = readIntermediate(categorizerState.getBytesRef(0, new BytesRef())); + try (IntBlock.Builder newIdsBuilder = blockFactory.newIntBlockBuilder(idMap.size())) { + int fromId = idMap.containsKey(0) ? 0 : 1; + int toId = fromId + idMap.size(); + for (int i = fromId; i < toId; i++) { + newIdsBuilder.appendInt(idMap.get(i)); + } + try (IntBlock newIds = newIdsBuilder.build()) { + addInput.add(0, newIds); + } + } + } + + /** + * Read intermediate state from a block. + * + * @return a map from the old category id to the new one. The old ids go from 0 to {@code size - 1}. + */ + private Map readIntermediate(BytesRef bytes) { + Map idMap = new HashMap<>(); + try (StreamInput in = new BytesArray(bytes).streamInput()) { + if (in.readBoolean()) { + seenNull = true; + idMap.put(NULL_ORD, NULL_ORD); + } + int count = in.readVInt(); + for (int oldCategoryId = 0; oldCategoryId < count; oldCategoryId++) { + int newCategoryId = categorizer.mergeWireCategory(new SerializableTokenListCategory(in)).getId(); + // +1 because the 0 ordinal is reserved for null + idMap.put(oldCategoryId + 1, newCategoryId + 1); + } + return idMap; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Serializes the intermediate state into a single BytesRef block, or an empty Null block if there are no categories. + */ + private Block buildIntermediateBlock() { + if (categorizer.getCategoryCount() == 0) { + return blockFactory.newConstantNullBlock(seenNull ? 1 : 0); + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeBoolean(seenNull); + out.writeVInt(categorizer.getCategoryCount()); + for (SerializableTokenListCategory category : categorizer.toCategoriesById()) { + category.writeTo(out); + } + // We're returning a block with N positions just because the Page must have all blocks with the same position count! + int positionCount = categorizer.getCategoryCount() + (seenNull ? 1 : 0); + return blockFactory.newConstantBytesRefBlockWith(out.bytes().toBytesRef(), positionCount); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private Block buildFinalBlock() { + BytesRefBuilder scratch = new BytesRefBuilder(); + + if (seenNull) { + try (BytesRefBlock.Builder result = blockFactory.newBytesRefBlockBuilder(categorizer.getCategoryCount())) { + result.appendNull(); + for (SerializableTokenListCategory category : categorizer.toCategoriesById()) { + scratch.copyChars(category.getRegex()); + result.appendBytesRef(scratch.get()); + scratch.clear(); + } + return result.build(); + } + } + + try (BytesRefVector.Builder result = blockFactory.newBytesRefVectorBuilder(categorizer.getCategoryCount())) { + for (SerializableTokenListCategory category : categorizer.toCategoriesById()) { + scratch.copyChars(category.getRegex()); + result.appendBytesRef(scratch.get()); + scratch.clear(); + } + return result.build().asBlock(); + } + } + + /** + * Similar implementation to an Evaluator. + */ + private final class CategorizeEvaluator implements Releasable { + private final CategorizationAnalyzer analyzer; + + CategorizeEvaluator(CategorizationAnalyzer analyzer) { + this.analyzer = analyzer; + } + + Block eval(BytesRefBlock vBlock) { + BytesRefVector vVector = vBlock.asVector(); + if (vVector == null) { + return eval(vBlock.getPositionCount(), vBlock); + } + IntVector vector = eval(vBlock.getPositionCount(), vVector); + return vector.asBlock(); + } + + IntBlock eval(int positionCount, BytesRefBlock vBlock) { + try (IntBlock.Builder result = blockFactory.newIntBlockBuilder(positionCount)) { + BytesRef vScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + if (vBlock.isNull(p)) { + seenNull = true; + result.appendInt(NULL_ORD); + continue; + } + int first = vBlock.getFirstValueIndex(p); + int count = vBlock.getValueCount(p); + if (count == 1) { + result.appendInt(process(vBlock.getBytesRef(first, vScratch))); + continue; + } + int end = first + count; + result.beginPositionEntry(); + for (int i = first; i < end; i++) { + result.appendInt(process(vBlock.getBytesRef(i, vScratch))); + } + result.endPositionEntry(); + } + return result.build(); + } + } + + IntVector eval(int positionCount, BytesRefVector vVector) { + try (IntVector.FixedBuilder result = blockFactory.newIntVectorFixedBuilder(positionCount)) { + BytesRef vScratch = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + result.appendInt(p, process(vVector.getBytesRef(p, vScratch))); + } + return result.build(); + } + } + + int process(BytesRef v) { + var category = categorizer.computeCategory(v.utf8ToString(), analyzer); + if (category == null) { + seenNull = true; + return NULL_ORD; + } + return category.getId() + 1; + } + + @Override + public void close() { + analyzer.close(); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 9e48bc13cdafa..1e06cf1ea4450 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; @@ -18,8 +17,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.BlockLoader; -import java.util.List; - /** * A Block is a columnar representation of homogenous data. It has a position (row) count, and * various data retrieval methods for accessing the underlying data that is stored at a given @@ -291,19 +288,6 @@ static Block[] buildAll(Block.Builder... builders) { } } - static List getNamedWriteables() { - return List.of( - IntBlock.ENTRY, - LongBlock.ENTRY, - FloatBlock.ENTRY, - DoubleBlock.ENTRY, - BytesRefBlock.ENTRY, - BooleanBlock.ENTRY, - ConstantNullBlock.ENTRY, - CompositeBlock.ENTRY - ); - } - /** * Serialization type for blocks: 0 and 1 replace false/true used in pre-8.14 */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockWritables.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockWritables.java new file mode 100644 index 0000000000000..ff9139e57e52e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockWritables.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +import java.util.List; + +public class BlockWritables { + + public static List getNamedWriteables() { + return List.of( + IntBlock.ENTRY, + LongBlock.ENTRY, + FloatBlock.ENTRY, + DoubleBlock.ENTRY, + BytesRefBlock.ENTRY, + BooleanBlock.ENTRY, + ConstantNullBlock.ENTRY, + CompositeBlock.ENTRY + ); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/BytesRefBucketedSort.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/BytesRefBucketedSort.java index 9198de53b1e04..6dca94b9bc79a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/BytesRefBucketedSort.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/sort/BytesRefBucketedSort.java @@ -147,7 +147,7 @@ public void merge(int bucket, BytesRefBucketedSort other, int otherBucket) { // The value was never collected. return; } - other.checkInvariant(bucket); + other.checkInvariant(otherBucket); long otherStart = other.startIndex(otherBucket, otherRootIndex); long otherEnd = other.common.endIndex(otherRootIndex); // TODO: This can be improved for heapified buckets by making use of the heap structures diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMaxFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMaxFactory.java new file mode 100644 index 0000000000000..ba7de22b1b821 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMaxFactory.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.search.MultiValueMode; + +import java.io.IOException; +import java.util.List; +import java.util.function.Function; + +/** + * Factory that generates an operator that finds the max value of a field using the {@link LuceneMinMaxOperator}. + */ +public final class LuceneMaxFactory extends LuceneOperator.Factory { + + public enum NumberType implements LuceneMinMaxOperator.NumberType { + INTEGER { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantIntBlockWith(Math.toIntExact(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantIntBlockWith(Integer.MIN_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + FLOAT { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantFloatBlockWith(NumericUtils.sortableIntToFloat(Math.toIntExact(result)), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantFloatBlockWith(-Float.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + LONG { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantLongBlockWith(result, pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantLongBlockWith(Long.MIN_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }, + DOUBLE { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(NumericUtils.sortableLongToDouble(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(-Double.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }; + + public final NumericDocValues multiValueMode(SortedNumericDocValues sortedNumericDocValues) { + return MultiValueMode.MAX.select(sortedNumericDocValues); + } + + public final long fromPointValues(PointValues pointValues) throws IOException { + return bytesToLong(pointValues.getMaxPackedValue()); + } + + public final long evaluate(long value1, long value2) { + return Math.max(value1, value2); + } + + abstract long bytesToLong(byte[] bytes); + } + + private final String fieldName; + private final NumberType numberType; + + public LuceneMaxFactory( + List contexts, + Function queryFunction, + DataPartitioning dataPartitioning, + int taskConcurrency, + String fieldName, + NumberType numberType, + int limit + ) { + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); + this.fieldName = fieldName; + this.numberType = numberType; + } + + @Override + public SourceOperator get(DriverContext driverContext) { + return new LuceneMinMaxOperator(driverContext.blockFactory(), sliceQueue, fieldName, numberType, limit, Long.MIN_VALUE); + } + + @Override + public String describe() { + return "LuceneMaxOperator[type = " + + numberType.name() + + ", dataPartitioning = " + + dataPartitioning + + ", fieldName = " + + fieldName + + ", limit = " + + limit + + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinFactory.java new file mode 100644 index 0000000000000..e3c6c8310373d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinFactory.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.search.MultiValueMode; + +import java.io.IOException; +import java.util.List; +import java.util.function.Function; + +/** + * Factory that generates an operator that finds the min value of a field using the {@link LuceneMinMaxOperator}. + */ +public final class LuceneMinFactory extends LuceneOperator.Factory { + + public enum NumberType implements LuceneMinMaxOperator.NumberType { + INTEGER { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantIntBlockWith(Math.toIntExact(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantIntBlockWith(Integer.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + FLOAT { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantFloatBlockWith(NumericUtils.sortableIntToFloat(Math.toIntExact(result)), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantFloatBlockWith(Float.POSITIVE_INFINITY, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToInt(bytes, 0); + } + }, + LONG { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantLongBlockWith(result, pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantLongBlockWith(Long.MAX_VALUE, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }, + DOUBLE { + @Override + public Block buildResult(BlockFactory blockFactory, long result, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(NumericUtils.sortableLongToDouble(result), pageSize); + } + + @Override + public Block buildEmptyResult(BlockFactory blockFactory, int pageSize) { + return blockFactory.newConstantDoubleBlockWith(Double.POSITIVE_INFINITY, pageSize); + } + + @Override + long bytesToLong(byte[] bytes) { + return NumericUtils.sortableBytesToLong(bytes, 0); + } + }; + + public final NumericDocValues multiValueMode(SortedNumericDocValues sortedNumericDocValues) { + return MultiValueMode.MIN.select(sortedNumericDocValues); + } + + public final long fromPointValues(PointValues pointValues) throws IOException { + return bytesToLong(pointValues.getMinPackedValue()); + } + + public final long evaluate(long value1, long value2) { + return Math.min(value1, value2); + } + + abstract long bytesToLong(byte[] bytes); + } + + private final String fieldName; + private final NumberType numberType; + + public LuceneMinFactory( + List contexts, + Function queryFunction, + DataPartitioning dataPartitioning, + int taskConcurrency, + String fieldName, + NumberType numberType, + int limit + ) { + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); + this.fieldName = fieldName; + this.numberType = numberType; + } + + @Override + public SourceOperator get(DriverContext driverContext) { + return new LuceneMinMaxOperator(driverContext.blockFactory(), sliceQueue, fieldName, numberType, limit, Long.MAX_VALUE); + } + + @Override + public String describe() { + return "LuceneMinOperator[type = " + + numberType.name() + + ", dataPartitioning = " + + dataPartitioning + + ", fieldName = " + + fieldName + + ", limit = " + + limit + + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java new file mode 100644 index 0000000000000..c41c31345df4e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneMinMaxOperator.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.util.Bits; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.search.MultiValueMode; + +import java.io.IOException; + +/** + * Operator that finds the min or max value of a field using Lucene searches + * and returns always one entry that mimics the min/max aggregation internal state: + * 1. the min/max with a type depending on the {@link NumberType} (The initial value if no doc is seen) + * 2. a bool flag (seen) that is true if at least one document has been matched, otherwise false + *

+ * It works for fields that index data using lucene {@link PointValues} and/or {@link SortedNumericDocValues}. + * It assumes that {@link SortedNumericDocValues} are always present. + */ +final class LuceneMinMaxOperator extends LuceneOperator { + + sealed interface NumberType permits LuceneMinFactory.NumberType, LuceneMaxFactory.NumberType { + + /** Extract the competitive value from the {@link PointValues} */ + long fromPointValues(PointValues pointValues) throws IOException; + + /** Wraps the provided {@link SortedNumericDocValues} with a {@link MultiValueMode} */ + NumericDocValues multiValueMode(SortedNumericDocValues sortedNumericDocValues); + + /** Return the competitive value between {@code value1} and {@code value2} */ + long evaluate(long value1, long value2); + + /** Build the corresponding block */ + Block buildResult(BlockFactory blockFactory, long result, int pageSize); + + /** Build the corresponding block */ + Block buildEmptyResult(BlockFactory blockFactory, int pageSize); + } + + private static final int PAGE_SIZE = 1; + + private boolean seen = false; + private int remainingDocs; + private long result; + + private final NumberType numberType; + + private final String fieldName; + + LuceneMinMaxOperator( + BlockFactory blockFactory, + LuceneSliceQueue sliceQueue, + String fieldName, + NumberType numberType, + int limit, + long initialResult + ) { + super(blockFactory, PAGE_SIZE, sliceQueue); + this.remainingDocs = limit; + this.numberType = numberType; + this.fieldName = fieldName; + this.result = initialResult; + } + + @Override + public boolean isFinished() { + return doneCollecting || remainingDocs == 0; + } + + @Override + public void finish() { + doneCollecting = true; + } + + @Override + public Page getCheckedOutput() throws IOException { + if (isFinished()) { + assert remainingDocs <= 0 : remainingDocs; + return null; + } + final long start = System.nanoTime(); + try { + final LuceneScorer scorer = getCurrentOrLoadNextScorer(); + // no scorer means no more docs + if (scorer == null) { + remainingDocs = 0; + } else { + final LeafReader reader = scorer.leafReaderContext().reader(); + final Query query = scorer.weight().getQuery(); + if (query == null || query instanceof MatchAllDocsQuery) { + final PointValues pointValues = reader.getPointValues(fieldName); + // only apply shortcut if we are visiting all documents, otherwise we need to trigger the search + // on doc values as that's the order they are visited without push down. + if (pointValues != null && pointValues.getDocCount() >= remainingDocs) { + final Bits liveDocs = reader.getLiveDocs(); + if (liveDocs == null) { + // In data partitioning, we might have got the same segment previous + // to this but with a different document range. And we're totally ignoring that range. + // We're just reading the min/max from the segment. That's sneaky, but it makes sense. + // And if we get another slice in the same segment we may as well skip it - + // we've already looked. + if (scorer.position() == 0) { + seen = true; + result = numberType.evaluate(result, numberType.fromPointValues(pointValues)); + if (remainingDocs != NO_LIMIT) { + remainingDocs -= pointValues.getDocCount(); + } + } + scorer.markAsDone(); + } + } + } + if (scorer.isDone() == false) { + // could not apply shortcut, trigger the search + final NumericDocValues values = numberType.multiValueMode(reader.getSortedNumericDocValues(fieldName)); + final LeafCollector leafCollector = new LeafCollector() { + @Override + public void setScorer(Scorable scorer) {} + + @Override + public void collect(int doc) throws IOException { + assert remainingDocs > 0; + remainingDocs--; + if (values.advanceExact(doc)) { + seen = true; + result = numberType.evaluate(result, values.longValue()); + } + } + }; + scorer.scoreNextRange(leafCollector, reader.getLiveDocs(), remainingDocs); + } + } + + Page page = null; + // emit only one page + if (remainingDocs <= 0 && pagesEmitted == 0) { + pagesEmitted++; + Block result = null; + BooleanBlock seen = null; + try { + result = this.seen + ? numberType.buildResult(blockFactory, this.result, PAGE_SIZE) + : numberType.buildEmptyResult(blockFactory, PAGE_SIZE); + seen = blockFactory.newConstantBooleanBlockWith(this.seen, PAGE_SIZE); + page = new Page(PAGE_SIZE, result, seen); + } finally { + if (page == null) { + Releasables.closeExpectNoException(result, seen); + } + } + } + return page; + } finally { + processingNanos += System.nanoTime() - start; + } + } + + @Override + protected void describe(StringBuilder sb) { + sb.append(", remainingDocs=").append(remainingDocs); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 6f75298e95dd7..bbc3ace3716ba 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -79,6 +79,7 @@ public abstract static class Factory implements SourceOperator.SourceOperatorFac protected final DataPartitioning dataPartitioning; protected final int taskConcurrency; protected final int limit; + protected final ScoreMode scoreMode; protected final LuceneSliceQueue sliceQueue; /** @@ -95,6 +96,7 @@ protected Factory( ScoreMode scoreMode ) { this.limit = limit; + this.scoreMode = scoreMode; this.dataPartitioning = dataPartitioning; var weightFunction = weightFunction(queryFunction, scoreMode); this.sliceQueue = LuceneSliceQueue.create(contexts, weightFunction, dataPartitioning, taskConcurrency); @@ -438,7 +440,8 @@ static Function weightFunction(Functionor that {@link #noMatch} is true. */ private Scorer scorer; + /** + * Thread that initialized the {@link #scorer}. + */ + private Thread scorerThread; + /** * Lazily initialed {@link BulkScorer} for this. {@code null} here means uninitialized * or that {@link #noMatch} is true. */ private BulkScorer bulkScorer; + /** + * Thread that initialized the {@link #bulkScorer}. + */ + private Thread bulkScorerThread; + /** * Set to {@code true} if, in the process of building a {@link Scorer} or {@link BulkScorer}, * the {@link Weight} tells us there aren't any matches. @@ -223,7 +234,10 @@ BooleanVector scoreDense(int min, int max) throws IOException { if (noMatch) { return blockFactory.newConstantBooleanVector(false, length); } - if (bulkScorer == null) { + if (bulkScorer == null || // The bulkScorer wasn't initialized + Thread.currentThread() != bulkScorerThread // The bulkScorer was initialized on a different thread + ) { + bulkScorerThread = Thread.currentThread(); bulkScorer = weight.bulkScorer(ctx); if (bulkScorer == null) { noMatch = true; @@ -257,8 +271,11 @@ private void initScorer(int minDocId) throws IOException { if (noMatch) { return; } - if (scorer == null || scorer.iterator().docID() > minDocId) { - // The previous block might have been beyond this one, reset the scorer and try again. + if (scorer == null || // Scorer not initialized + scorerThread != Thread.currentThread() || // Scorer initialized on a different thread + scorer.iterator().docID() > minDocId // The previous block came "after" this one + ) { + scorerThread = Thread.currentThread(); scorer = weight.scorer(ctx); if (scorer == null) { noMatch = true; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 3721fec3b2eb8..4afabcadf60cd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -13,7 +13,9 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; @@ -25,6 +27,9 @@ import java.util.List; import java.util.function.Function; +import static org.apache.lucene.search.ScoreMode.COMPLETE; +import static org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES; + /** * Source operator that incrementally runs Lucene searches */ @@ -34,6 +39,7 @@ public class LuceneSourceOperator extends LuceneOperator { private int remainingDocs; private IntVector.Builder docsBuilder; + private DoubleVector.Builder scoreBuilder; private final LeafCollector leafCollector; private final int minPageSize; @@ -47,15 +53,16 @@ public Factory( DataPartitioning dataPartitioning, int taskConcurrency, int maxPageSize, - int limit + int limit, + boolean scoring ) { - super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, scoring ? COMPLETE : COMPLETE_NO_SCORES); this.maxPageSize = maxPageSize; } @Override public SourceOperator get(DriverContext driverContext) { - return new LuceneSourceOperator(driverContext.blockFactory(), maxPageSize, sliceQueue, limit); + return new LuceneSourceOperator(driverContext.blockFactory(), maxPageSize, sliceQueue, limit, scoreMode); } public int maxPageSize() { @@ -70,32 +77,65 @@ public String describe() { + maxPageSize + ", limit = " + limit + + ", scoreMode = " + + scoreMode + "]"; } } - public LuceneSourceOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue, int limit) { + @SuppressWarnings("this-escape") + public LuceneSourceOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue, int limit, ScoreMode scoreMode) { super(blockFactory, maxPageSize, sliceQueue); this.minPageSize = Math.max(1, maxPageSize / 2); this.remainingDocs = limit; - this.docsBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); - this.leafCollector = new LeafCollector() { - @Override - public void setScorer(Scorable scorer) { - + int estimatedSize = Math.min(limit, maxPageSize); + boolean success = false; + try { + this.docsBuilder = blockFactory.newIntVectorBuilder(estimatedSize); + if (scoreMode.needsScores()) { + scoreBuilder = blockFactory.newDoubleVectorBuilder(estimatedSize); + this.leafCollector = new ScoringCollector(); + } else { + scoreBuilder = null; + this.leafCollector = new LimitingCollector(); } + success = true; + } finally { + if (success == false) { + close(); + } + } + } - @Override - public void collect(int doc) { - if (remainingDocs > 0) { - --remainingDocs; - docsBuilder.appendInt(doc); - currentPagePos++; - } else { - throw new CollectionTerminatedException(); - } + class LimitingCollector implements LeafCollector { + @Override + public void setScorer(Scorable scorer) {} + + @Override + public void collect(int doc) throws IOException { + if (remainingDocs > 0) { + --remainingDocs; + docsBuilder.appendInt(doc); + currentPagePos++; + } else { + throw new CollectionTerminatedException(); } - }; + } + } + + final class ScoringCollector extends LuceneSourceOperator.LimitingCollector { + private Scorable scorable; + + @Override + public void setScorer(Scorable scorer) { + this.scorable = scorer; + } + + @Override + public void collect(int doc) throws IOException { + super.collect(doc); + scoreBuilder.appendDouble(scorable.score()); + } } @Override @@ -139,15 +179,27 @@ public Page getCheckedOutput() throws IOException { IntBlock shard = null; IntBlock leaf = null; IntVector docs = null; + DoubleVector scores = null; + DocBlock docBlock = null; try { shard = blockFactory.newConstantIntBlockWith(scorer.shardContext().index(), currentPagePos); leaf = blockFactory.newConstantIntBlockWith(scorer.leafReaderContext().ord, currentPagePos); docs = docsBuilder.build(); docsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); - page = new Page(currentPagePos, new DocVector(shard.asVector(), leaf.asVector(), docs, true).asBlock()); + docBlock = new DocVector(shard.asVector(), leaf.asVector(), docs, true).asBlock(); + shard = null; + leaf = null; + docs = null; + if (scoreBuilder == null) { + page = new Page(currentPagePos, docBlock); + } else { + scores = scoreBuilder.build(); + scoreBuilder = blockFactory.newDoubleVectorBuilder(Math.min(remainingDocs, maxPageSize)); + page = new Page(currentPagePos, docBlock, scores.asBlock()); + } } finally { if (page == null) { - Releasables.closeExpectNoException(shard, leaf, docs); + Releasables.closeExpectNoException(shard, leaf, docs, docBlock, scores); } } currentPagePos = 0; @@ -160,7 +212,7 @@ public Page getCheckedOutput() throws IOException { @Override public void close() { - docsBuilder.close(); + Releasables.close(docsBuilder, scoreBuilder); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index 0f600958b93b3..8da62963ffb64 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -10,15 +10,22 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollectorManager; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.elasticsearch.common.Strings; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DocVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; @@ -29,17 +36,21 @@ import org.elasticsearch.search.sort.SortBuilder; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Optional; import java.util.function.Function; import java.util.stream.Collectors; +import static org.apache.lucene.search.ScoreMode.COMPLETE; +import static org.apache.lucene.search.ScoreMode.TOP_DOCS; + /** * Source operator that builds Pages out of the output of a TopFieldCollector (aka TopN) */ public final class LuceneTopNSourceOperator extends LuceneOperator { - public static final class Factory extends LuceneOperator.Factory { + public static class Factory extends LuceneOperator.Factory { private final int maxPageSize; private final List> sorts; @@ -50,16 +61,17 @@ public Factory( int taskConcurrency, int maxPageSize, int limit, - List> sorts + List> sorts, + boolean scoring ) { - super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, ScoreMode.TOP_DOCS); + super(contexts, queryFunction, dataPartitioning, taskConcurrency, limit, scoring ? COMPLETE : TOP_DOCS); this.maxPageSize = maxPageSize; this.sorts = sorts; } @Override public SourceOperator get(DriverContext driverContext) { - return new LuceneTopNSourceOperator(driverContext.blockFactory(), maxPageSize, sorts, limit, sliceQueue); + return new LuceneTopNSourceOperator(driverContext.blockFactory(), maxPageSize, sorts, limit, sliceQueue, scoreMode); } public int maxPageSize() { @@ -75,6 +87,8 @@ public String describe() { + maxPageSize + ", limit = " + limit + + ", scoreMode = " + + scoreMode + ", sorts = [" + notPrettySorts + "]]"; @@ -93,17 +107,20 @@ public String describe() { private PerShardCollector perShardCollector; private final List> sorts; private final int limit; + private final ScoreMode scoreMode; public LuceneTopNSourceOperator( BlockFactory blockFactory, int maxPageSize, List> sorts, int limit, - LuceneSliceQueue sliceQueue + LuceneSliceQueue sliceQueue, + ScoreMode scoreMode ) { super(blockFactory, maxPageSize, sliceQueue); this.sorts = sorts; this.limit = limit; + this.scoreMode = scoreMode; } @Override @@ -145,7 +162,7 @@ private Page collect() throws IOException { try { if (perShardCollector == null || perShardCollector.shardContext.index() != scorer.shardContext().index()) { // TODO: share the bottom between shardCollectors - perShardCollector = new PerShardCollector(scorer.shardContext(), sorts, limit); + perShardCollector = newPerShardCollector(scorer.shardContext(), sorts, limit); } var leafCollector = perShardCollector.getLeafCollector(scorer.leafReaderContext()); scorer.scoreNextRange(leafCollector, scorer.leafReaderContext().reader().getLiveDocs(), maxPageSize); @@ -171,7 +188,7 @@ private Page emit(boolean startEmitting) { assert isEmitting() == false : "offset=" + offset + " score_docs=" + Arrays.toString(scoreDocs); offset = 0; if (perShardCollector != null) { - scoreDocs = perShardCollector.topFieldCollector.topDocs().scoreDocs; + scoreDocs = perShardCollector.collector.topDocs().scoreDocs; } else { scoreDocs = new ScoreDoc[0]; } @@ -183,10 +200,13 @@ private Page emit(boolean startEmitting) { IntBlock shard = null; IntVector segments = null; IntVector docs = null; + DocBlock docBlock = null; + DoubleBlock scores = null; Page page = null; try ( IntVector.Builder currentSegmentBuilder = blockFactory.newIntVectorFixedBuilder(size); - IntVector.Builder currentDocsBuilder = blockFactory.newIntVectorFixedBuilder(size) + IntVector.Builder currentDocsBuilder = blockFactory.newIntVectorFixedBuilder(size); + DoubleVector.Builder currentScoresBuilder = scoreVectorOrNull(size); ) { int start = offset; offset += size; @@ -196,53 +216,130 @@ private Page emit(boolean startEmitting) { int segment = ReaderUtil.subIndex(doc, leafContexts); currentSegmentBuilder.appendInt(segment); currentDocsBuilder.appendInt(doc - leafContexts.get(segment).docBase); // the offset inside the segment + if (currentScoresBuilder != null) { + float score = getScore(scoreDocs[i]); + currentScoresBuilder.appendDouble(score); + } } shard = blockFactory.newConstantIntBlockWith(perShardCollector.shardContext.index(), size); segments = currentSegmentBuilder.build(); docs = currentDocsBuilder.build(); - page = new Page(size, new DocVector(shard.asVector(), segments, docs, null).asBlock()); + docBlock = new DocVector(shard.asVector(), segments, docs, null).asBlock(); + shard = null; + segments = null; + docs = null; + if (currentScoresBuilder == null) { + page = new Page(size, docBlock); + } else { + scores = currentScoresBuilder.build().asBlock(); + page = new Page(size, docBlock, scores); + } } finally { if (page == null) { - Releasables.closeExpectNoException(shard, segments, docs); + Releasables.closeExpectNoException(shard, segments, docs, docBlock, scores); } } pagesEmitted++; return page; } + private float getScore(ScoreDoc scoreDoc) { + if (scoreDoc instanceof FieldDoc fieldDoc) { + if (Float.isNaN(fieldDoc.score)) { + if (sorts != null) { + return (Float) fieldDoc.fields[sorts.size() + 1]; + } else { + return (Float) fieldDoc.fields[0]; + } + } else { + return fieldDoc.score; + } + } else { + return scoreDoc.score; + } + } + + private DoubleVector.Builder scoreVectorOrNull(int size) { + if (scoreMode.needsScores()) { + return blockFactory.newDoubleVectorFixedBuilder(size); + } else { + return null; + } + } + @Override protected void describe(StringBuilder sb) { sb.append(", limit = ").append(limit); + sb.append(", scoreMode = ").append(scoreMode); String notPrettySorts = sorts.stream().map(Strings::toString).collect(Collectors.joining(",")); sb.append(", sorts = [").append(notPrettySorts).append("]"); } - static final class PerShardCollector { + PerShardCollector newPerShardCollector(ShardContext shardContext, List> sorts, int limit) throws IOException { + Optional sortAndFormats = shardContext.buildSort(sorts); + if (sortAndFormats.isEmpty()) { + throw new IllegalStateException("sorts must not be disabled in TopN"); + } + if (scoreMode.needsScores() == false) { + return new NonScoringPerShardCollector(shardContext, sortAndFormats.get().sort, limit); + } else { + SortField[] sortFields = sortAndFormats.get().sort.getSort(); + if (sortFields != null && sortFields.length == 1 && sortFields[0].needsScores() && sortFields[0].getReverse() == false) { + // SORT _score DESC + return new ScoringPerShardCollector( + shardContext, + new TopScoreDocCollectorManager(limit, null, limit, false).newCollector() + ); + } else { + // SORT ..., _score, ... + var sort = new Sort(); + if (sortFields != null) { + var l = new ArrayList<>(Arrays.asList(sortFields)); + l.add(SortField.FIELD_DOC); + l.add(SortField.FIELD_SCORE); + sort = new Sort(l.toArray(SortField[]::new)); + } + return new ScoringPerShardCollector( + shardContext, + new TopFieldCollectorManager(sort, limit, null, limit, false).newCollector() + ); + } + } + } + + abstract static class PerShardCollector { private final ShardContext shardContext; - private final TopFieldCollector topFieldCollector; + private final TopDocsCollector collector; private int leafIndex; private LeafCollector leafCollector; private Thread currentThread; - PerShardCollector(ShardContext shardContext, List> sorts, int limit) throws IOException { + PerShardCollector(ShardContext shardContext, TopDocsCollector collector) { this.shardContext = shardContext; - Optional sortAndFormats = shardContext.buildSort(sorts); - if (sortAndFormats.isEmpty()) { - throw new IllegalStateException("sorts must not be disabled in TopN"); - } - - // We don't use CollectorManager here as we don't retrieve the total hits and sort by score. - this.topFieldCollector = new TopFieldCollectorManager(sortAndFormats.get().sort, limit, null, 0, false).newCollector(); + this.collector = collector; } LeafCollector getLeafCollector(LeafReaderContext leafReaderContext) throws IOException { if (currentThread != Thread.currentThread() || leafIndex != leafReaderContext.ord) { - leafCollector = topFieldCollector.getLeafCollector(leafReaderContext); + leafCollector = collector.getLeafCollector(leafReaderContext); leafIndex = leafReaderContext.ord; currentThread = Thread.currentThread(); } return leafCollector; } } + + static final class NonScoringPerShardCollector extends PerShardCollector { + NonScoringPerShardCollector(ShardContext shardContext, Sort sort, int limit) { + // We don't use CollectorManager here as we don't retrieve the total hits and sort by score. + super(shardContext, new TopFieldCollectorManager(sort, limit, null, 0, false).newCollector()); + } + } + + static final class ScoringPerShardCollector extends PerShardCollector { + ScoringPerShardCollector(ShardContext shardContext, TopDocsCollector topDocsCollector) { + super(shardContext, topDocsCollector); + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 2c36b42dee277..06b890603e489 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -43,6 +43,7 @@ public abstract class AsyncOperator implements Operator { private final int maxOutstandingRequests; private final LongAdder totalTimeInNanos = new LongAdder(); + private boolean finished = false; private volatile boolean closed = false; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java index 03a4ca2b0ad5e..6f8386ec08de1 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashAggregationOperator.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.Describable; +import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; @@ -23,6 +24,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -39,11 +41,20 @@ public class HashAggregationOperator implements Operator { public record HashAggregationOperatorFactory( List groups, + AggregatorMode aggregatorMode, List aggregators, - int maxPageSize + int maxPageSize, + AnalysisRegistry analysisRegistry ) implements OperatorFactory { @Override public Operator get(DriverContext driverContext) { + if (groups.stream().anyMatch(BlockHash.GroupSpec::isCategorize)) { + return new HashAggregationOperator( + aggregators, + () -> BlockHash.buildCategorizeBlockHash(groups, aggregatorMode, driverContext.blockFactory(), analysisRegistry), + driverContext + ); + } return new HashAggregationOperator( aggregators, () -> BlockHash.build(groups, driverContext.blockFactory(), maxPageSize, false), diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java deleted file mode 100644 index 4b4379eb6a4d8..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.operator; - -import org.elasticsearch.compute.data.BlockFactory; - -import java.util.List; -import java.util.Objects; - -import static java.util.stream.Collectors.joining; - -public class RowOperator extends LocalSourceOperator { - - private final List objects; - - public record RowOperatorFactory(List objects) implements SourceOperatorFactory { - - @Override - public SourceOperator get(DriverContext driverContext) { - return new RowOperator(driverContext.blockFactory(), objects); - } - - @Override - public String describe() { - return "RowOperator[objects = " + objects.stream().map(Objects::toString).collect(joining(",")) + "]"; - } - } - - public RowOperator(BlockFactory blockFactory, List objects) { - super(blockFactory, () -> objects); - this.objects = objects; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()).append("["); - sb.append("objects=").append(objects); - sb.append("]"); - return sb.toString(); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 06059944f1310..a943a90d02e87 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -40,13 +40,15 @@ import java.io.IOException; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; /** * {@link ExchangeService} is responsible for exchanging pages between exchange sinks and sources on the same or different nodes. * It holds a map of {@link ExchangeSinkHandler} instances for each node in the cluster to serve {@link ExchangeRequest}s - * To connect exchange sources to exchange sinks, use the {@link ExchangeSourceHandler#addRemoteSink(RemoteSink, int)} method. + * To connect exchange sources to exchange sinks, use {@link ExchangeSourceHandler#addRemoteSink(RemoteSink, boolean, int, ActionListener)}. */ public final class ExchangeService extends AbstractLifecycleComponent { // TODO: Make this a child action of the data node transport to ensure that exchanges @@ -291,6 +293,7 @@ static final class TransportRemoteSink implements RemoteSink { final Executor responseExecutor; final AtomicLong estimatedPageSizeInBytes = new AtomicLong(0L); + final AtomicBoolean finished = new AtomicBoolean(false); TransportRemoteSink( TransportService transportService, @@ -310,7 +313,33 @@ static final class TransportRemoteSink implements RemoteSink { @Override public void fetchPageAsync(boolean allSourcesFinished, ActionListener listener) { - final long reservedBytes = estimatedPageSizeInBytes.get(); + if (allSourcesFinished) { + if (finished.compareAndSet(false, true)) { + doFetchPageAsync(true, listener); + } else { + // already finished or promised + listener.onResponse(new ExchangeResponse(blockFactory, null, true)); + } + } else { + // already finished + if (finished.get()) { + listener.onResponse(new ExchangeResponse(blockFactory, null, true)); + return; + } + doFetchPageAsync(false, ActionListener.wrap(r -> { + if (r.finished()) { + finished.set(true); + } + listener.onResponse(r); + }, e -> { + finished.set(true); + listener.onFailure(e); + })); + } + } + + private void doFetchPageAsync(boolean allSourcesFinished, ActionListener listener) { + final long reservedBytes = allSourcesFinished ? 0 : estimatedPageSizeInBytes.get(); if (reservedBytes > 0) { // This doesn't fully protect ESQL from OOM, but reduces the likelihood. blockFactory.breaker().addEstimateBytesAndMaybeBreak(reservedBytes, "fetch page"); @@ -339,6 +368,10 @@ public boolean isEmpty() { return sinks.isEmpty(); } + public Set sinkKeys() { + return sinks.keySet(); + } + @Override protected void doStart() { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index 757a3262433c8..614c3fe0ecc5c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -93,7 +93,7 @@ public IsBlockedResult waitForWriting() { * @param sourceFinished if true, then this handler can finish as sources have enough pages. * @param listener the listener that will be notified when pages are ready or this handler is finished * @see RemoteSink - * @see ExchangeSourceHandler#addRemoteSink(RemoteSink, int) + * @see ExchangeSourceHandler#addRemoteSink(RemoteSink, boolean, int, ActionListener) */ public void fetchPageAsync(boolean sourceFinished, ActionListener listener) { if (sourceFinished) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index e3fc0e26e34e0..61b3386ce0274 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -24,10 +24,10 @@ /** * An {@link ExchangeSourceHandler} asynchronously fetches pages and status from multiple {@link RemoteSink}s * and feeds them to its {@link ExchangeSource}, which are created using the {@link #createExchangeSource()}) method. - * {@link RemoteSink}s are added using the {@link #addRemoteSink(RemoteSink, int)}) method. + * {@link RemoteSink}s are added using the {@link #addRemoteSink(RemoteSink, boolean, int, ActionListener)}) method. * * @see #createExchangeSource() - * @see #addRemoteSink(RemoteSink, int) + * @see #addRemoteSink(RemoteSink, boolean, int, ActionListener) */ public final class ExchangeSourceHandler { private final ExchangeBuffer buffer; @@ -35,13 +35,43 @@ public final class ExchangeSourceHandler { private final PendingInstances outstandingSinks; private final PendingInstances outstandingSources; + // Collect failures that occur while fetching pages from the remote sink with `failFast=true`. + // The exchange source will stop fetching and abort as soon as any failure is added to this failure collector. + // The final failure collected will be notified to callers via the {@code completionListener}. private final FailureCollector failure = new FailureCollector(); - public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) { + /** + * Creates a new ExchangeSourceHandler. + * + * @param maxBufferSize the maximum size of the exchange buffer. A larger buffer reduces ``pauses`` but uses more memory, + * which could otherwise be allocated for other purposes. + * @param fetchExecutor the executor used to fetch pages. + * @param completionListener a listener that will be notified when the exchange source handler fails or completes + */ + public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor, ActionListener completionListener) { this.buffer = new ExchangeBuffer(maxBufferSize); this.fetchExecutor = fetchExecutor; this.outstandingSinks = new PendingInstances(() -> buffer.finish(false)); this.outstandingSources = new PendingInstances(() -> buffer.finish(true)); + buffer.addCompletionListener(ActionListener.running(() -> { + final ActionListener listener = ActionListener.assertAtLeastOnce(completionListener).delegateFailure((l, unused) -> { + final Exception e = failure.getFailure(); + if (e != null) { + l.onFailure(e); + } else { + l.onResponse(null); + } + }); + try (RefCountingListener refs = new RefCountingListener(listener)) { + for (PendingInstances pending : List.of(outstandingSinks, outstandingSources)) { + // Create an outstanding instance and then finish to complete the completionListener + // if we haven't registered any instances of exchange sinks or exchange sources before. + pending.trackNewInstance(); + pending.completion.addListener(refs.acquire()); + pending.finishInstance(); + } + } + })); } private class ExchangeSourceImpl implements ExchangeSource { @@ -89,20 +119,6 @@ public int bufferSize() { } } - public void addCompletionListener(ActionListener listener) { - buffer.addCompletionListener(ActionListener.running(() -> { - try (RefCountingListener refs = new RefCountingListener(listener)) { - for (PendingInstances pending : List.of(outstandingSinks, outstandingSources)) { - // Create an outstanding instance and then finish to complete the completionListener - // if we haven't registered any instances of exchange sinks or exchange sources before. - pending.trackNewInstance(); - pending.completion.addListener(refs.acquire()); - pending.finishInstance(); - } - } - })); - } - /** * Create a new {@link ExchangeSource} for exchanging data * @@ -159,10 +175,14 @@ void exited() { private final class RemoteSinkFetcher { private volatile boolean finished = false; private final RemoteSink remoteSink; + private final boolean failFast; + private final ActionListener completionListener; - RemoteSinkFetcher(RemoteSink remoteSink) { + RemoteSinkFetcher(RemoteSink remoteSink, boolean failFast, ActionListener completionListener) { outstandingSinks.trackNewInstance(); this.remoteSink = remoteSink; + this.failFast = failFast; + this.completionListener = completionListener; } void fetchPage() { @@ -198,15 +218,22 @@ void fetchPage() { } void onSinkFailed(Exception e) { - failure.unwrapAndCollect(e); + if (failFast) { + failure.unwrapAndCollect(e); + } buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading - onSinkComplete(); + if (finished == false) { + finished = true; + outstandingSinks.finishInstance(); + completionListener.onFailure(e); + } } void onSinkComplete() { if (finished == false) { finished = true; outstandingSinks.finishInstance(); + completionListener.onResponse(null); } } } @@ -215,25 +242,39 @@ void onSinkComplete() { * Add a remote sink as a new data source of this handler. The handler will start fetching data from this remote sink intermediately. * * @param remoteSink the remote sink - * @param instances the number of concurrent ``clients`` that this handler should use to fetch pages. More clients reduce latency, - * but add overhead. + * @param failFast determines how failures in this remote sink are handled: + * - If {@code false}, failures from this remote sink will not cause the exchange source to abort. + * Callers must handle these failures notified via {@code listener}. + * - If {@code true}, failures from this remote sink will cause the exchange source to abort. + * Callers can safely ignore failures notified via this listener, as they are collected and + * reported by the exchange source. + * @param instances the number of concurrent ``clients`` that this handler should use to fetch pages. + * More clients reduce latency, but add overhead. + * @param listener a listener that will be notified when the sink fails or completes * @see ExchangeSinkHandler#fetchPageAsync(boolean, ActionListener) */ - public void addRemoteSink(RemoteSink remoteSink, int instances) { - for (int i = 0; i < instances; i++) { - var fetcher = new RemoteSinkFetcher(remoteSink); - fetchExecutor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - fetcher.onSinkFailed(e); + public void addRemoteSink(RemoteSink remoteSink, boolean failFast, int instances, ActionListener listener) { + final ActionListener sinkListener = ActionListener.assertAtLeastOnce(ActionListener.notifyOnce(listener)); + fetchExecutor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + if (failFast) { + failure.unwrapAndCollect(e); } + buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading + sinkListener.onFailure(e); + } - @Override - protected void doRun() { - fetcher.fetchPage(); + @Override + protected void doRun() { + try (RefCountingListener refs = new RefCountingListener(sinkListener)) { + for (int i = 0; i < instances; i++) { + var fetcher = new RemoteSinkFetcher(remoteSink, failFast, refs.acquire()); + fetcher.fetchPage(); + } } - }); - } + } + }); } /** diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperator.java index 2093094fb8af5..0cd34d2ad4066 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperator.java @@ -22,6 +22,7 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.compute.operator.Warnings; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -38,17 +39,25 @@ public final class EnrichQuerySourceOperator extends SourceOperator { private int queryPosition = -1; private final IndexReader indexReader; private final IndexSearcher searcher; + private final Warnings warnings; private final int maxPageSize; // using smaller pages enables quick cancellation and reduces sorting costs public static final int DEFAULT_MAX_PAGE_SIZE = 256; - public EnrichQuerySourceOperator(BlockFactory blockFactory, int maxPageSize, QueryList queryList, IndexReader indexReader) { + public EnrichQuerySourceOperator( + BlockFactory blockFactory, + int maxPageSize, + QueryList queryList, + IndexReader indexReader, + Warnings warnings + ) { this.blockFactory = blockFactory; this.maxPageSize = maxPageSize; this.queryList = queryList; this.indexReader = indexReader; this.searcher = new IndexSearcher(indexReader); + this.warnings = warnings; } @Override @@ -73,12 +82,18 @@ public Page getOutput() { } int totalMatches = 0; do { - Query query = nextQuery(); - if (query == null) { - assert isFinished(); - break; + Query query; + try { + query = nextQuery(); + if (query == null) { + assert isFinished(); + break; + } + query = searcher.rewrite(new ConstantScoreQuery(query)); + } catch (Exception e) { + warnings.registerException(e); + continue; } - query = searcher.rewrite(new ConstantScoreQuery(query)); final var weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1.0f); if (weight == null) { continue; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 0d39a5bf8227e..e6ef10e53ec7c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -394,7 +394,8 @@ static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query qu randomFrom(DataPartitioning.values()), randomIntBetween(1, 10), randomPageSize(), - limit + limit, + false // no scoring ); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index cb190dfffafb9..58925a5ca36fc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -54,7 +54,6 @@ import static org.elasticsearch.compute.data.BlockTestUtils.append; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.in; /** * Shared tests for testing grouped aggregations. @@ -105,8 +104,10 @@ private Operator.OperatorFactory simpleWithMode( } return new HashAggregationOperator.HashAggregationOperatorFactory( List.of(new BlockHash.GroupSpec(0, ElementType.LONG)), + mode, List.of(supplier.groupingAggregatorFactory(mode)), - randomPageSize() + randomPageSize(), + null ); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTestCase.java new file mode 100644 index 0000000000000..fa93c0aa1c375 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTestCase.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.MockBlockFactory; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.test.ESTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public abstract class BlockHashTestCase extends ESTestCase { + + final CircuitBreaker breaker = newLimitedBreaker(ByteSizeValue.ofGb(1)); + final BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, mockBreakerService(breaker)); + final MockBlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); + + // A breaker service that always returns the given breaker for getBreaker(CircuitBreaker.REQUEST) + private static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { + CircuitBreakerService breakerService = mock(CircuitBreakerService.class); + when(breakerService.getBreaker(CircuitBreaker.REQUEST)).thenReturn(breaker); + return breakerService; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index 088e791348840..ede2d68ca2367 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -11,11 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; @@ -26,7 +22,6 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.OrdinalBytesRefBlock; import org.elasticsearch.compute.data.OrdinalBytesRefVector; import org.elasticsearch.compute.data.Page; @@ -34,8 +29,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.test.ESTestCase; import org.junit.After; import java.util.ArrayList; @@ -54,14 +47,8 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -public class BlockHashTests extends ESTestCase { - - final CircuitBreaker breaker = new MockBigArrays.LimitedBreaker("esql-test-breaker", ByteSizeValue.ofGb(1)); - final BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, mockBreakerService(breaker)); - final MockBlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); +public class BlockHashTests extends BlockHashTestCase { @ParametersFactory public static List params() { @@ -1534,13 +1521,6 @@ private void assertKeys(Block[] actualKeys, Object[][] expectedKeys) { } } - // A breaker service that always returns the given breaker for getBreaker(CircuitBreaker.REQUEST) - static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { - CircuitBreakerService breakerService = mock(CircuitBreakerService.class); - when(breakerService.getBreaker(CircuitBreaker.REQUEST)).thenReturn(breaker); - return breakerService; - } - IntVector intRange(int startInclusive, int endExclusive) { return IntVector.range(startInclusive, endExclusive, TestBlockFactory.getNonBreakingInstance()); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java new file mode 100644 index 0000000000000..3c47e85a4a9c8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/CategorizeBlockHashTests.java @@ -0,0 +1,460 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.blockhash; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.SumLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.CannedSourceOperator; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.HashAggregationOperator; +import org.elasticsearch.compute.operator.LocalSourceOperator; +import org.elasticsearch.compute.operator.PageConsumerOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.scanners.StablePluginsRegistry; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.compute.operator.OperatorTestCase.runDriver; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class CategorizeBlockHashTests extends BlockHashTestCase { + + private AnalysisRegistry analysisRegistry; + + @Before + private void initAnalysisRegistry() throws IOException { + analysisRegistry = new AnalysisModule( + TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build() + ), + List.of(new MachineLearning(Settings.EMPTY), new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry(); + } + + public void testCategorizeRaw() { + final Page page; + boolean withNull = randomBoolean(); + final int positions = 7 + (withNull ? 1 : 0); + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(positions)) { + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.1")); + builder.appendBytesRef(new BytesRef("Connection error")); + builder.appendBytesRef(new BytesRef("Connection error")); + builder.appendBytesRef(new BytesRef("Connection error")); + builder.appendBytesRef(new BytesRef("Disconnected")); + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.2")); + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.3")); + if (withNull) { + if (randomBoolean()) { + builder.appendNull(); + } else { + builder.appendBytesRef(new BytesRef("")); + } + } + page = new Page(builder.build()); + } + + try (BlockHash hash = new CategorizeBlockHash(blockFactory, 0, AggregatorMode.INITIAL, analysisRegistry)) { + hash.add(page, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + assertEquals(groupIds.getPositionCount(), positions); + + assertEquals(1, groupIds.getInt(0)); + assertEquals(2, groupIds.getInt(1)); + assertEquals(2, groupIds.getInt(2)); + assertEquals(2, groupIds.getInt(3)); + assertEquals(3, groupIds.getInt(4)); + assertEquals(1, groupIds.getInt(5)); + assertEquals(1, groupIds.getInt(6)); + if (withNull) { + assertEquals(0, groupIds.getInt(7)); + } + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + add(positionOffset, groupIds.asBlock()); + } + + @Override + public void close() { + fail("hashes should not close AddInput"); + } + }); + } finally { + page.releaseBlocks(); + } + + // TODO: randomize and try multiple pages. + // TODO: assert the state of the BlockHash after adding pages. Including the categorizer state. + // TODO: also test the lookup method and other stuff. + } + + public void testCategorizeIntermediate() { + Page page1; + boolean withNull = randomBoolean(); + int positions1 = 7 + (withNull ? 1 : 0); + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(positions1)) { + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.1")); + builder.appendBytesRef(new BytesRef("Connection error")); + builder.appendBytesRef(new BytesRef("Connection error")); + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.2")); + builder.appendBytesRef(new BytesRef("Connection error")); + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.3")); + builder.appendBytesRef(new BytesRef("Connected to 10.1.0.4")); + if (withNull) { + if (randomBoolean()) { + builder.appendNull(); + } else { + builder.appendBytesRef(new BytesRef("")); + } + } + page1 = new Page(builder.build()); + } + Page page2; + int positions2 = 5; + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(positions2)) { + builder.appendBytesRef(new BytesRef("Disconnected")); + builder.appendBytesRef(new BytesRef("Connected to 10.2.0.1")); + builder.appendBytesRef(new BytesRef("Disconnected")); + builder.appendBytesRef(new BytesRef("Connected to 10.3.0.2")); + builder.appendBytesRef(new BytesRef("System shutdown")); + page2 = new Page(builder.build()); + } + + Page intermediatePage1, intermediatePage2; + + // Fill intermediatePages with the intermediate state from the raw hashes + try ( + BlockHash rawHash1 = new CategorizeBlockHash(blockFactory, 0, AggregatorMode.INITIAL, analysisRegistry); + BlockHash rawHash2 = new CategorizeBlockHash(blockFactory, 0, AggregatorMode.INITIAL, analysisRegistry); + ) { + rawHash1.add(page1, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + assertEquals(groupIds.getPositionCount(), positions1); + assertEquals(1, groupIds.getInt(0)); + assertEquals(2, groupIds.getInt(1)); + assertEquals(2, groupIds.getInt(2)); + assertEquals(1, groupIds.getInt(3)); + assertEquals(2, groupIds.getInt(4)); + assertEquals(1, groupIds.getInt(5)); + assertEquals(1, groupIds.getInt(6)); + if (withNull) { + assertEquals(0, groupIds.getInt(7)); + } + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + add(positionOffset, groupIds.asBlock()); + } + + @Override + public void close() { + fail("hashes should not close AddInput"); + } + }); + intermediatePage1 = new Page(rawHash1.getKeys()[0]); + + rawHash2.add(page2, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + assertEquals(groupIds.getPositionCount(), positions2); + assertEquals(1, groupIds.getInt(0)); + assertEquals(2, groupIds.getInt(1)); + assertEquals(1, groupIds.getInt(2)); + assertEquals(2, groupIds.getInt(3)); + assertEquals(3, groupIds.getInt(4)); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + add(positionOffset, groupIds.asBlock()); + } + + @Override + public void close() { + fail("hashes should not close AddInput"); + } + }); + intermediatePage2 = new Page(rawHash2.getKeys()[0]); + } finally { + page1.releaseBlocks(); + page2.releaseBlocks(); + } + + try (BlockHash intermediateHash = new CategorizeBlockHash(blockFactory, 0, AggregatorMode.INTERMEDIATE, null)) { + intermediateHash.add(intermediatePage1, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + Set values = IntStream.range(0, groupIds.getPositionCount()) + .map(groupIds::getInt) + .boxed() + .collect(Collectors.toSet()); + if (withNull) { + assertEquals(Set.of(0, 1, 2), values); + } else { + assertEquals(Set.of(1, 2), values); + } + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + add(positionOffset, groupIds.asBlock()); + } + + @Override + public void close() { + fail("hashes should not close AddInput"); + } + }); + + intermediateHash.add(intermediatePage2, new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + Set values = IntStream.range(0, groupIds.getPositionCount()) + .map(groupIds::getInt) + .boxed() + .collect(Collectors.toSet()); + // The category IDs {0, 1, 2} should map to groups {0, 2, 3}, because + // 0 matches an existing category (Connected to ...), and the others are new. + assertEquals(Set.of(1, 3, 4), values); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + add(positionOffset, groupIds.asBlock()); + } + + @Override + public void close() { + fail("hashes should not close AddInput"); + } + }); + } finally { + intermediatePage1.releaseBlocks(); + intermediatePage2.releaseBlocks(); + } + } + + public void testCategorize_withDriver() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofMb(256)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + DriverContext driverContext = new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + + LocalSourceOperator.BlockSupplier input1 = () -> { + try ( + BytesRefVector.Builder textsBuilder = driverContext.blockFactory().newBytesRefVectorBuilder(10); + LongVector.Builder countsBuilder = driverContext.blockFactory().newLongVectorBuilder(10) + ) { + // Note that just using "a" or "aaa" doesn't work, because the ml_standard + // tokenizer drops numbers, including hexadecimal ones. + textsBuilder.appendBytesRef(new BytesRef("aaazz")); + textsBuilder.appendBytesRef(new BytesRef("bbbzz")); + textsBuilder.appendBytesRef(new BytesRef("words words words goodbye jan")); + textsBuilder.appendBytesRef(new BytesRef("words words words goodbye nik")); + textsBuilder.appendBytesRef(new BytesRef("words words words goodbye tom")); + textsBuilder.appendBytesRef(new BytesRef("words words words hello jan")); + textsBuilder.appendBytesRef(new BytesRef("ccczz")); + textsBuilder.appendBytesRef(new BytesRef("dddzz")); + countsBuilder.appendLong(1); + countsBuilder.appendLong(2); + countsBuilder.appendLong(800); + countsBuilder.appendLong(80); + countsBuilder.appendLong(8000); + countsBuilder.appendLong(900); + countsBuilder.appendLong(30); + countsBuilder.appendLong(4); + return new Block[] { textsBuilder.build().asBlock(), countsBuilder.build().asBlock() }; + } + }; + LocalSourceOperator.BlockSupplier input2 = () -> { + try ( + BytesRefVector.Builder textsBuilder = driverContext.blockFactory().newBytesRefVectorBuilder(10); + LongVector.Builder countsBuilder = driverContext.blockFactory().newLongVectorBuilder(10) + ) { + textsBuilder.appendBytesRef(new BytesRef("words words words hello nik")); + textsBuilder.appendBytesRef(new BytesRef("words words words hello nik")); + textsBuilder.appendBytesRef(new BytesRef("ccczz")); + textsBuilder.appendBytesRef(new BytesRef("words words words goodbye chris")); + textsBuilder.appendBytesRef(new BytesRef("dddzz")); + textsBuilder.appendBytesRef(new BytesRef("eeezz")); + countsBuilder.appendLong(9); + countsBuilder.appendLong(90); + countsBuilder.appendLong(3); + countsBuilder.appendLong(8); + countsBuilder.appendLong(40); + countsBuilder.appendLong(5); + return new Block[] { textsBuilder.build().asBlock(), countsBuilder.build().asBlock() }; + } + }; + + List intermediateOutput = new ArrayList<>(); + + Driver driver = new Driver( + driverContext, + new LocalSourceOperator(input1), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + List.of(makeGroupSpec()), + AggregatorMode.INITIAL, + List.of( + new SumLongAggregatorFunctionSupplier(List.of(1)).groupingAggregatorFactory(AggregatorMode.INITIAL), + new MaxLongAggregatorFunctionSupplier(List.of(1)).groupingAggregatorFactory(AggregatorMode.INITIAL) + ), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(intermediateOutput::add), + () -> {} + ); + runDriver(driver); + + driver = new Driver( + driverContext, + new LocalSourceOperator(input2), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + List.of(makeGroupSpec()), + AggregatorMode.INITIAL, + List.of( + new SumLongAggregatorFunctionSupplier(List.of(1)).groupingAggregatorFactory(AggregatorMode.INITIAL), + new MaxLongAggregatorFunctionSupplier(List.of(1)).groupingAggregatorFactory(AggregatorMode.INITIAL) + ), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(intermediateOutput::add), + () -> {} + ); + runDriver(driver); + + List finalOutput = new ArrayList<>(); + + driver = new Driver( + driverContext, + new CannedSourceOperator(intermediateOutput.iterator()), + List.of( + new HashAggregationOperator.HashAggregationOperatorFactory( + List.of(makeGroupSpec()), + AggregatorMode.FINAL, + List.of( + new SumLongAggregatorFunctionSupplier(List.of(1, 2)).groupingAggregatorFactory(AggregatorMode.FINAL), + new MaxLongAggregatorFunctionSupplier(List.of(3, 4)).groupingAggregatorFactory(AggregatorMode.FINAL) + ), + 16 * 1024, + analysisRegistry + ).get(driverContext) + ), + new PageConsumerOperator(finalOutput::add), + () -> {} + ); + runDriver(driver); + + assertThat(finalOutput, hasSize(1)); + assertThat(finalOutput.get(0).getBlockCount(), equalTo(3)); + BytesRefBlock outputTexts = finalOutput.get(0).getBlock(0); + LongBlock outputSums = finalOutput.get(0).getBlock(1); + LongBlock outputMaxs = finalOutput.get(0).getBlock(2); + assertThat(outputSums.getPositionCount(), equalTo(outputTexts.getPositionCount())); + assertThat(outputMaxs.getPositionCount(), equalTo(outputTexts.getPositionCount())); + Map sums = new HashMap<>(); + Map maxs = new HashMap<>(); + for (int i = 0; i < outputTexts.getPositionCount(); i++) { + sums.put(outputTexts.getBytesRef(i, new BytesRef()).utf8ToString(), outputSums.getLong(i)); + maxs.put(outputTexts.getBytesRef(i, new BytesRef()).utf8ToString(), outputMaxs.getLong(i)); + } + assertThat( + sums, + equalTo( + Map.of( + ".*?aaazz.*?", + 1L, + ".*?bbbzz.*?", + 2L, + ".*?ccczz.*?", + 33L, + ".*?dddzz.*?", + 44L, + ".*?eeezz.*?", + 5L, + ".*?words.+?words.+?words.+?goodbye.*?", + 8888L, + ".*?words.+?words.+?words.+?hello.*?", + 999L + ) + ) + ); + assertThat( + maxs, + equalTo( + Map.of( + ".*?aaazz.*?", + 1L, + ".*?bbbzz.*?", + 2L, + ".*?ccczz.*?", + 30L, + ".*?dddzz.*?", + 40L, + ".*?eeezz.*?", + 5L, + ".*?words.+?words.+?words.+?goodbye.*?", + 8000L, + ".*?words.+?words.+?words.+?hello.*?", + 900L + ) + ) + ); + Releasables.close(() -> Iterators.map(finalOutput.iterator(), (Page p) -> p::releaseBlocks)); + } + + private BlockHash.GroupSpec makeGroupSpec() { + return new BlockHash.GroupSpec(0, ElementType.BYTES_REF, true); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java index e72c34fdb5f7a..d76e58d1c8a30 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java @@ -29,7 +29,7 @@ public abstract class SerializationTestCase extends ESTestCase { BigArrays bigArrays; protected BlockFactory blockFactory; - NamedWriteableRegistry registry = new NamedWriteableRegistry(Block.getNamedWriteables()); + NamedWriteableRegistry registry = new NamedWriteableRegistry(BlockWritables.getNamedWriteables()); @Before public final void newBlockFactory() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java index f857f50b2d30f..339c2bba2a734 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/sort/BucketedSortTestCase.java @@ -363,6 +363,52 @@ public final void testMergeEmptyToEmpty() { } } + public final void testMergeOtherBigger() { + try (T sort = build(SortOrder.ASC, 3)) { + var values = threeSortedValues(); + + collect(sort, values.get(0), 0); + collect(sort, values.get(1), 0); + collect(sort, values.get(2), 0); + + try (T other = build(SortOrder.ASC, 3)) { + collect(other, values.get(0), 0); + collect(other, values.get(1), 1); + collect(other, values.get(2), 2); + + merge(sort, 0, other, 0); + merge(sort, 0, other, 1); + merge(sort, 0, other, 2); + } + + assertBlock(sort, 0, List.of(values.get(0), values.get(0), values.get(1))); + } + } + + public final void testMergeThisBigger() { + try (T sort = build(SortOrder.ASC, 3)) { + var values = threeSortedValues(); + + collect(sort, values.get(0), 0); + collect(sort, values.get(1), 1); + collect(sort, values.get(2), 2); + + try (T other = build(SortOrder.ASC, 3)) { + collect(other, values.get(0), 0); + collect(other, values.get(1), 0); + collect(other, values.get(2), 0); + + merge(sort, 0, other, 0); + merge(sort, 1, other, 0); + merge(sort, 2, other, 0); + } + + assertBlock(sort, 0, List.of(values.get(0), values.get(0), values.get(1))); + assertBlock(sort, 1, List.of(values.get(0), values.get(1), values.get(1))); + assertBlock(sort, 2, values); + } + } + protected void assertBlock(T sort, int groupId, List values) { var blockFactory = TestBlockFactory.getNonBreakingInstance(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxDoubleOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxDoubleOperatorTests.java new file mode 100644 index 0000000000000..4cb113457b23f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxDoubleOperatorTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxDoubleOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.DOUBLE; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + double max = -Double.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new DoubleField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(newValue())); + } + + private double newValue() { + final double value = randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true); + max = Math.max(max, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(DoubleBlock.class)); + final DoubleBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getDouble(0), equalTo(-Double.MAX_VALUE)); + } else { + assertThat(db.getDouble(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxDoubleAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(DoubleBlock.class)); + final DoubleBlock db = (DoubleBlock) block; + if (exactResult) { + assertThat(db.getDouble(0), equalTo(max)); + } else { + assertThat(db.getDouble(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxFloatOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxFloatOperatorTests.java new file mode 100644 index 0000000000000..4a009a2d84c66 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxFloatOperatorTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxFloatAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxFloatOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.FLOAT; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + float max = -Float.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new FloatField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + private float newValue() { + final float value = randomFloatBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true); + max = Math.max(max, value); + return value; + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.floatToSortableInt(newValue())); + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(FloatBlock.class)); + final FloatBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getFloat(0), equalTo(-Float.MAX_VALUE)); + } else { + assertThat(db.getFloat(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxFloatAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(FloatBlock.class)); + final FloatBlock fb = (FloatBlock) block; + if (exactResult) { + assertThat(fb.getFloat(0), equalTo(max)); + } else { + assertThat(fb.getFloat(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxIntOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxIntOperatorTests.java new file mode 100644 index 0000000000000..a6118481ca43d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxIntOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxIntOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.INTEGER; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + int max = Integer.MIN_VALUE; + + @Override + public IndexableField newPointField() { + return new IntField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + private int newValue() { + final int value = randomInt(); + max = Math.max(max, value); + return value; + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(IntBlock.class)); + final IntBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getInt(0), equalTo(Integer.MIN_VALUE)); + } else { + assertThat(db.getInt(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxIntAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(IntBlock.class)); + final IntBlock ib = (IntBlock) block; + if (exactResult) { + assertThat(ib.getInt(0), equalTo(max)); + } else { + assertThat(ib.getInt(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxLongOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxLongOperatorTests.java new file mode 100644 index 0000000000000..894c8e862123e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxLongOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MaxLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class LuceneMaxLongOperatorTests extends LuceneMaxOperatorTestCase { + + @Override + public LuceneMaxFactory.NumberType getNumberType() { + return LuceneMaxFactory.NumberType.LONG; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + long max = Long.MIN_VALUE; + + @Override + public IndexableField newPointField() { + return new LongField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + private long newValue() { + final long value = randomLong(); + max = Math.max(max, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(LongBlock.class)); + final LongBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getLong(0), equalTo(Long.MIN_VALUE)); + } else { + assertThat(db.getLong(0), lessThanOrEqualTo(max)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MaxLongAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMaxValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(LongBlock.class)); + final LongBlock lb = (LongBlock) block; + if (exactResult) { + assertThat(lb.getLong(0), equalTo(max)); + } else { + assertThat(lb.getLong(0), lessThanOrEqualTo(max)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxOperatorTestCase.java new file mode 100644 index 0000000000000..f5214dccbd00c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMaxOperatorTestCase.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.hamcrest.Matcher; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.matchesRegex; + +public abstract class LuceneMaxOperatorTestCase extends AnyOperatorTestCase { + + protected interface NumberTypeTest { + + IndexableField newPointField(); + + IndexableField newDocValuesField(); + + void assertPage(Page page); + + AggregatorFunction newAggregatorFunction(DriverContext context); + + void assertMaxValue(Block block, boolean exactResult); + + } + + protected abstract NumberTypeTest getNumberTypeTest(); + + protected abstract LuceneMaxFactory.NumberType getNumberType(); + + protected static final String FIELD_NAME = "field"; + private final Directory directory = newDirectory(); + private IndexReader reader; + + @After + public void closeIndex() throws IOException { + IOUtils.close(reader, directory); + } + + @Override + protected LuceneMaxFactory simple() { + return simple(getNumberTypeTest(), randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + } + + private LuceneMaxFactory simple(NumberTypeTest numberTypeTest, DataPartitioning dataPartitioning, int numDocs, int limit) { + final boolean enableShortcut = randomBoolean(); + final boolean enableMultiValue = randomBoolean(); + final int commitEvery = Math.max(1, numDocs / 10); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + + for (int d = 0; d < numDocs; d++) { + final var numValues = enableMultiValue ? randomIntBetween(1, 5) : 1; + final var doc = new Document(); + for (int i = 0; i < numValues; i++) { + if (enableShortcut) { + doc.add(numberTypeTest.newPointField()); + } else { + doc.add(numberTypeTest.newDocValuesField()); + } + } + writer.addDocument(doc); + if (d % commitEvery == 0) { + writer.commit(); + } + } + reader = writer.getReader(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + final ShardContext ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); + final Query query; + if (enableShortcut && randomBoolean()) { + query = new MatchAllDocsQuery(); + } else { + query = SortedNumericDocValuesField.newSlowRangeQuery(FIELD_NAME, Long.MIN_VALUE, Long.MAX_VALUE); + } + return new LuceneMaxFactory(List.of(ctx), c -> query, dataPartitioning, between(1, 8), FIELD_NAME, getNumberType(), limit); + } + + public void testSimple() { + testSimple(this::driverContext); + } + + public void testSimpleWithCranky() { + try { + testSimple(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testSimple(Supplier contexts) { + int size = between(1_000, 20_000); + int limit = randomBoolean() ? between(10, size) : Integer.MAX_VALUE; + testMax(contexts, size, limit); + } + + public void testEmpty() { + testEmpty(this::driverContext); + } + + public void testEmptyWithCranky() { + try { + testEmpty(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testEmpty(Supplier contexts) { + int limit = randomBoolean() ? between(10, 10000) : Integer.MAX_VALUE; + testMax(contexts, 0, limit); + } + + private void testMax(Supplier contexts, int size, int limit) { + DataPartitioning dataPartitioning = randomFrom(DataPartitioning.values()); + NumberTypeTest numberTypeTest = getNumberTypeTest(); + LuceneMaxFactory factory = simple(numberTypeTest, dataPartitioning, size, limit); + List results = new CopyOnWriteArrayList<>(); + List drivers = new ArrayList<>(); + int taskConcurrency = between(1, 8); + for (int i = 0; i < taskConcurrency; i++) { + DriverContext ctx = contexts.get(); + drivers.add(new Driver(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); + } + OperatorTestCase.runDriver(drivers); + assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); + + try (AggregatorFunction aggregatorFunction = numberTypeTest.newAggregatorFunction(contexts.get())) { + for (Page page : results) { + assertThat(page.getPositionCount(), is(1)); // one row + assertThat(page.getBlockCount(), is(2)); // two blocks + numberTypeTest.assertPage(page); + aggregatorFunction.addIntermediateInput(page); + } + + final Block[] result = new Block[1]; + try { + aggregatorFunction.evaluateFinal(result, 0, contexts.get()); + if (result[0].areAllValuesNull() == false) { + boolean exactResult = size <= limit; + numberTypeTest.assertMaxValue(result[0], exactResult); + } + } finally { + Releasables.close(result); + } + } + } + + @Override + protected final Matcher expectedToStringOfSimple() { + return matchesRegex("LuceneMinMaxOperator\\[maxPageSize = \\d+, remainingDocs=100]"); + } + + @Override + protected final Matcher expectedDescriptionOfSimple() { + return matchesRegex( + "LuceneMaxOperator\\[type = " + + getNumberType().name() + + ", dataPartitioning = (DOC|SHARD|SEGMENT), fieldName = " + + FIELD_NAME + + ", limit = 100]" + ); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinDoubleOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinDoubleOperatorTests.java new file mode 100644 index 0000000000000..5fef2d4897030 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinDoubleOperatorTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinDoubleOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.DOUBLE; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + double min = Double.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new DoubleField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(newValue())); + } + + private double newValue() { + final double value = randomDoubleBetween(-Double.MAX_VALUE, Double.MAX_VALUE, true); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(DoubleBlock.class)); + final DoubleBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getDouble(0), equalTo(Double.POSITIVE_INFINITY)); + } else { + assertThat(db.getDouble(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinDoubleAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(DoubleBlock.class)); + final DoubleBlock db = (DoubleBlock) block; + if (exactResult) { + assertThat(db.getDouble(0), equalTo(min)); + } else { + assertThat(db.getDouble(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinFloatOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinFloatOperatorTests.java new file mode 100644 index 0000000000000..41c8751c08a96 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinFloatOperatorTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinFloatAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinFloatOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.FLOAT; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + float min = Float.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new FloatField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, NumericUtils.floatToSortableInt(newValue())); + } + + private float newValue() { + final float value = randomFloatBetween(-Float.MAX_VALUE, Float.MAX_VALUE, true); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(FloatBlock.class)); + final FloatBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + final float v = db.getFloat(0); + if (bb.getBoolean(0) == false) { + assertThat(db.getFloat(0), equalTo(Float.POSITIVE_INFINITY)); + } else { + assertThat(db.getFloat(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinFloatAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(FloatBlock.class)); + final FloatBlock fb = (FloatBlock) block; + if (exactResult) { + assertThat(fb.getFloat(0), equalTo(min)); + } else { + assertThat(fb.getFloat(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinIntegerOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinIntegerOperatorTests.java new file mode 100644 index 0000000000000..5d2c867f4f660 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinIntegerOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinIntegerOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.INTEGER; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + int min = Integer.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new IntField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + private int newValue() { + final int value = randomInt(); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(IntBlock.class)); + IntBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getInt(0), equalTo(Integer.MAX_VALUE)); + } else { + assertThat(db.getInt(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinIntAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(IntBlock.class)); + final IntBlock ib = (IntBlock) block; + if (exactResult) { + assertThat(ib.getInt(0), equalTo(min)); + } else { + assertThat(ib.getInt(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinLongOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinLongOperatorTests.java new file mode 100644 index 0000000000000..15c34f5853ae2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinLongOperatorTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.MinLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; + +public class LuceneMinLongOperatorTests extends LuceneMinOperatorTestCase { + + @Override + public LuceneMinFactory.NumberType getNumberType() { + return LuceneMinFactory.NumberType.LONG; + } + + @Override + protected NumberTypeTest getNumberTypeTest() { + return new NumberTypeTest() { + + long min = Long.MAX_VALUE; + + @Override + public IndexableField newPointField() { + return new LongField(FIELD_NAME, newValue(), randomFrom(Field.Store.values())); + } + + @Override + public IndexableField newDocValuesField() { + return new SortedNumericDocValuesField(FIELD_NAME, newValue()); + } + + private long newValue() { + final long value = randomLong(); + min = Math.min(min, value); + return value; + } + + @Override + public void assertPage(Page page) { + assertThat(page.getBlock(0), instanceOf(LongBlock.class)); + final LongBlock db = page.getBlock(0); + assertThat(page.getBlock(1), instanceOf(BooleanBlock.class)); + final BooleanBlock bb = page.getBlock(1); + if (bb.getBoolean(0) == false) { + assertThat(db.getLong(0), equalTo(Long.MAX_VALUE)); + } else { + assertThat(db.getLong(0), greaterThanOrEqualTo(min)); + } + } + + @Override + public AggregatorFunction newAggregatorFunction(DriverContext context) { + return new MinLongAggregatorFunctionSupplier(List.of(0, 1)).aggregator(context); + } + + @Override + public void assertMinValue(Block block, boolean exactResult) { + assertThat(block, instanceOf(LongBlock.class)); + final LongBlock lb = (LongBlock) block; + if (exactResult) { + assertThat(lb.getLong(0), equalTo(min)); + } else { + assertThat(lb.getLong(0), greaterThanOrEqualTo(min)); + } + } + }; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java new file mode 100644 index 0000000000000..493512bd83bec --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneMinOperatorTestCase.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.AnyOperatorTestCase; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.hamcrest.Matcher; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.matchesRegex; + +public abstract class LuceneMinOperatorTestCase extends AnyOperatorTestCase { + + protected interface NumberTypeTest { + + IndexableField newPointField(); + + IndexableField newDocValuesField(); + + void assertPage(Page page); + + AggregatorFunction newAggregatorFunction(DriverContext context); + + void assertMinValue(Block block, boolean exactResult); + + } + + protected abstract NumberTypeTest getNumberTypeTest(); + + protected abstract LuceneMinFactory.NumberType getNumberType(); + + protected static final String FIELD_NAME = "field"; + private final Directory directory = newDirectory(); + private IndexReader reader; + + @After + public void closeIndex() throws IOException { + IOUtils.close(reader, directory); + } + + @Override + protected LuceneMinFactory simple() { + return simple(getNumberTypeTest(), randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + } + + private LuceneMinFactory simple(NumberTypeTest numberTypeTest, DataPartitioning dataPartitioning, int numDocs, int limit) { + final boolean enableShortcut = randomBoolean(); + final boolean enableMultiValue = randomBoolean(); + final int commitEvery = Math.max(1, numDocs / 10); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + + for (int d = 0; d < numDocs; d++) { + final var numValues = enableMultiValue ? randomIntBetween(1, 5) : 1; + final var doc = new Document(); + for (int i = 0; i < numValues; i++) { + if (enableShortcut) { + doc.add(numberTypeTest.newPointField()); + } else { + doc.add(numberTypeTest.newDocValuesField()); + } + } + writer.addDocument(doc); + if (d % commitEvery == 0) { + writer.commit(); + } + } + reader = writer.getReader(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + final ShardContext ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); + final Query query; + if (enableShortcut && randomBoolean()) { + query = new MatchAllDocsQuery(); + } else { + query = SortedNumericDocValuesField.newSlowRangeQuery(FIELD_NAME, Long.MIN_VALUE, Long.MAX_VALUE); + } + return new LuceneMinFactory(List.of(ctx), c -> query, dataPartitioning, between(1, 8), FIELD_NAME, getNumberType(), limit); + } + + public void testSimple() { + testSimple(this::driverContext); + } + + public void testSimpleWithCranky() { + try { + testSimple(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testSimple(Supplier contexts) { + int size = between(1_000, 20_000); + int limit = randomBoolean() ? between(10, size) : Integer.MAX_VALUE; + testMin(contexts, size, limit); + } + + public void testEmpty() { + testEmpty(this::driverContext); + } + + public void testEmptyWithCranky() { + try { + testEmpty(this::crankyDriverContext); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + + private void testEmpty(Supplier contexts) { + int limit = randomBoolean() ? between(10, 10000) : Integer.MAX_VALUE; + testMin(contexts, 0, limit); + } + + private void testMin(Supplier contexts, int size, int limit) { + DataPartitioning dataPartitioning = randomFrom(DataPartitioning.values()); + NumberTypeTest numberTypeTest = getNumberTypeTest(); + LuceneMinFactory factory = simple(numberTypeTest, dataPartitioning, size, limit); + List results = new CopyOnWriteArrayList<>(); + List drivers = new ArrayList<>(); + int taskConcurrency = between(1, 8); + for (int i = 0; i < taskConcurrency; i++) { + DriverContext ctx = contexts.get(); + drivers.add(new Driver(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); + } + OperatorTestCase.runDriver(drivers); + assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); + + try (AggregatorFunction aggregatorFunction = numberTypeTest.newAggregatorFunction(contexts.get())) { + for (Page page : results) { + assertThat(page.getPositionCount(), is(1)); // one row + assertThat(page.getBlockCount(), is(2)); // two blocks + numberTypeTest.assertPage(page); + aggregatorFunction.addIntermediateInput(page); + } + + final Block[] result = new Block[1]; + try { + aggregatorFunction.evaluateFinal(result, 0, contexts.get()); + if (result[0].areAllValuesNull() == false) { + boolean exactResult = size <= limit; + numberTypeTest.assertMinValue(result[0], exactResult); + } + } finally { + Releasables.close(result); + } + } + } + + @Override + protected final Matcher expectedToStringOfSimple() { + return matchesRegex("LuceneMinMaxOperator\\[maxPageSize = \\d+, remainingDocs=100]"); + } + + @Override + protected final Matcher expectedDescriptionOfSimple() { + return matchesRegex( + "LuceneMinOperator\\[type = " + + getNumberType().name() + + ", dataPartitioning = (DOC|SHARD|SEGMENT), fieldName = " + + FIELD_NAME + + ", limit = 100]" + ); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneQueryExpressionEvaluatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneQueryExpressionEvaluatorTests.java index beca522878358..ffaee536b443e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneQueryExpressionEvaluatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneQueryExpressionEvaluatorTests.java @@ -27,6 +27,8 @@ import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DocBlock; +import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.LuceneQueryExpressionEvaluator.DenseCollector; @@ -120,8 +122,9 @@ public void testTermQueryShuffled() throws IOException { private void assertTermQuery(String term, List results) { int matchCount = 0; for (Page page : results) { - BytesRefVector terms = page.getBlock(1).asVector(); - BooleanVector matches = page.getBlock(2).asVector(); + int initialBlockIndex = initialBlockIndex(page); + BytesRefVector terms = page.getBlock(initialBlockIndex).asVector(); + BooleanVector matches = page.getBlock(initialBlockIndex + 1).asVector(); for (int i = 0; i < page.getPositionCount(); i++) { BytesRef termAtPosition = terms.getBytesRef(i, new BytesRef()); assertThat(matches.getBoolean(i), equalTo(termAtPosition.utf8ToString().equals(term))); @@ -155,8 +158,9 @@ private void testTermsQuery(boolean shuffleDocs) throws IOException { List results = runQuery(values, new TermInSetQuery(MultiTermQuery.CONSTANT_SCORE_REWRITE, FIELD, matchingBytes), shuffleDocs); int matchCount = 0; for (Page page : results) { - BytesRefVector terms = page.getBlock(1).asVector(); - BooleanVector matches = page.getBlock(2).asVector(); + int initialBlockIndex = initialBlockIndex(page); + BytesRefVector terms = page.getBlock(initialBlockIndex).asVector(); + BooleanVector matches = page.getBlock(initialBlockIndex + 1).asVector(); for (int i = 0; i < page.getPositionCount(); i++) { BytesRef termAtPosition = terms.getBytesRef(i, new BytesRef()); assertThat(matches.getBoolean(i), equalTo(matching.contains(termAtPosition.utf8ToString()))); @@ -207,7 +211,7 @@ private List runQuery(Set values, Query query, boolean shuffleDocs List results = new ArrayList<>(); Driver driver = new Driver( driverContext, - luceneOperatorFactory(reader, new MatchAllDocsQuery(), LuceneOperator.NO_LIMIT).get(driverContext), + luceneOperatorFactory(reader, new MatchAllDocsQuery(), LuceneOperator.NO_LIMIT, scoring).get(driverContext), operators, new TestResultPageSinkOperator(results::add), () -> {} @@ -248,7 +252,21 @@ private DriverContext driverContext() { return new DriverContext(blockFactory.bigArrays(), blockFactory); } - static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query query, int limit) { + // Scores are not interesting to this test, but enabled conditionally and effectively ignored just for coverage. + private final boolean scoring = randomBoolean(); + + // Returns the initial block index, ignoring the score block if scoring is enabled + private int initialBlockIndex(Page page) { + assert page.getBlock(0) instanceof DocBlock : "expected doc block at index 0"; + if (scoring) { + assert page.getBlock(1) instanceof DoubleBlock : "expected double block at index 1"; + return 2; + } else { + return 1; + } + } + + static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query query, int limit, boolean scoring) { final ShardContext searchContext = new LuceneSourceOperatorTests.MockShardContext(reader, 0); return new LuceneSourceOperator.Factory( List.of(searchContext), @@ -256,7 +274,8 @@ static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query qu randomFrom(DataPartitioning.values()), randomIntBetween(1, 10), randomPageSize(), - limit + limit, + scoring ); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java index 626190c04c501..2dcc5e20d3f98 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java @@ -17,6 +17,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.compute.data.DocBlock; +import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -63,10 +65,10 @@ public void closeIndex() throws IOException { @Override protected LuceneSourceOperator.Factory simple() { - return simple(randomFrom(DataPartitioning.values()), between(1, 10_000), 100); + return simple(randomFrom(DataPartitioning.values()), between(1, 10_000), 100, scoring); } - private LuceneSourceOperator.Factory simple(DataPartitioning dataPartitioning, int numDocs, int limit) { + private LuceneSourceOperator.Factory simple(DataPartitioning dataPartitioning, int numDocs, int limit, boolean scoring) { int commitEvery = Math.max(1, numDocs / 10); try ( RandomIndexWriter writer = new RandomIndexWriter( @@ -91,7 +93,7 @@ private LuceneSourceOperator.Factory simple(DataPartitioning dataPartitioning, i ShardContext ctx = new MockShardContext(reader, 0); Function queryFunction = c -> new MatchAllDocsQuery(); int maxPageSize = between(10, Math.max(10, numDocs)); - return new LuceneSourceOperator.Factory(List.of(ctx), queryFunction, dataPartitioning, 1, maxPageSize, limit); + return new LuceneSourceOperator.Factory(List.of(ctx), queryFunction, dataPartitioning, 1, maxPageSize, limit, scoring); } @Override @@ -101,7 +103,10 @@ protected Matcher expectedToStringOfSimple() { @Override protected Matcher expectedDescriptionOfSimple() { - return matchesRegex("LuceneSourceOperator\\[dataPartitioning = (DOC|SHARD|SEGMENT), maxPageSize = \\d+, limit = 100]"); + return matchesRegex( + "LuceneSourceOperator" + + "\\[dataPartitioning = (DOC|SHARD|SEGMENT), maxPageSize = \\d+, limit = 100, scoreMode = (COMPLETE|COMPLETE_NO_SCORES)]" + ); } // TODO tests for the other data partitioning configurations @@ -149,7 +154,7 @@ public void testShardDataPartitioningWithCranky() { } private void testSimple(DriverContext ctx, int size, int limit) { - LuceneSourceOperator.Factory factory = simple(DataPartitioning.SHARD, size, limit); + LuceneSourceOperator.Factory factory = simple(DataPartitioning.SHARD, size, limit, scoring); Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD, ElementType.LONG); List results = new ArrayList<>(); @@ -164,7 +169,7 @@ private void testSimple(DriverContext ctx, int size, int limit) { } for (Page page : results) { - LongBlock sBlock = page.getBlock(1); + LongBlock sBlock = page.getBlock(initialBlockIndex(page)); for (int p = 0; p < page.getPositionCount(); p++) { assertThat(sBlock.getLong(sBlock.getFirstValueIndex(p)), both(greaterThanOrEqualTo(0L)).and(lessThan((long) size))); } @@ -174,6 +179,20 @@ private void testSimple(DriverContext ctx, int size, int limit) { assertThat(results, hasSize(both(greaterThanOrEqualTo(minPages)).and(lessThanOrEqualTo(maxPages)))); } + // Scores are not interesting to this test, but enabled conditionally and effectively ignored just for coverage. + private final boolean scoring = randomBoolean(); + + // Returns the initial block index, ignoring the score block if scoring is enabled + private int initialBlockIndex(Page page) { + assert page.getBlock(0) instanceof DocBlock : "expected doc block at index 0"; + if (scoring) { + assert page.getBlock(1) instanceof DoubleBlock : "expected double block at index 1"; + return 2; + } else { + return 1; + } + } + /** * Creates a mock search context with the given index reader. * The returned mock search context can be used to test with {@link LuceneOperator}. diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorScoringTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorScoringTests.java new file mode 100644 index 0000000000000..a0fa1c2c01c0a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorScoringTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSelector; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.Driver; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.TestResultPageSinkOperator; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.SortAndFormats; +import org.elasticsearch.search.sort.SortBuilder; +import org.hamcrest.Matcher; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.matchesRegex; + +public class LuceneTopNSourceOperatorScoringTests extends LuceneTopNSourceOperatorTests { + private static final MappedFieldType S_FIELD = new NumberFieldMapper.NumberFieldType("s", NumberFieldMapper.NumberType.LONG); + private Directory directory = newDirectory(); + private IndexReader reader; + + @After + private void closeIndex() throws IOException { + IOUtils.close(reader, directory); + } + + @Override + protected LuceneTopNSourceOperator.Factory simple() { + return simple(DataPartitioning.SHARD, 10_000, 100); + } + + private LuceneTopNSourceOperator.Factory simple(DataPartitioning dataPartitioning, int size, int limit) { + int commitEvery = Math.max(1, size / 10); + try ( + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int d = 0; d < size; d++) { + List doc = new ArrayList<>(); + doc.add(new SortedNumericDocValuesField("s", d)); + writer.addDocument(doc); + if (d % commitEvery == 0) { + writer.commit(); + } + } + reader = writer.getReader(); + } catch (IOException e) { + throw new RuntimeException(e); + } + + ShardContext ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0) { + @Override + public Optional buildSort(List> sorts) { + SortField field = new SortedNumericSortField("s", SortField.Type.LONG, false, SortedNumericSelector.Type.MIN); + return Optional.of(new SortAndFormats(new Sort(field), new DocValueFormat[] { null })); + } + }; + Function queryFunction = c -> new MatchAllDocsQuery(); + int taskConcurrency = 0; + int maxPageSize = between(10, Math.max(10, size)); + List> sorts = List.of(new FieldSortBuilder("s")); + return new LuceneTopNSourceOperator.Factory( + List.of(ctx), + queryFunction, + dataPartitioning, + taskConcurrency, + maxPageSize, + limit, + sorts, + true // scoring + ); + } + + @Override + protected Matcher expectedToStringOfSimple() { + return matchesRegex("LuceneTopNSourceOperator\\[maxPageSize = \\d+, limit = 100, scoreMode = COMPLETE, sorts = \\[\\{.+}]]"); + } + + @Override + protected Matcher expectedDescriptionOfSimple() { + return matchesRegex( + "LuceneTopNSourceOperator" + + "\\[dataPartitioning = (DOC|SHARD|SEGMENT), maxPageSize = \\d+, limit = 100, scoreMode = COMPLETE, sorts = \\[\\{.+}]]" + ); + } + + @Override + protected void testSimple(DriverContext ctx, int size, int limit) { + LuceneTopNSourceOperator.Factory factory = simple(DataPartitioning.SHARD, size, limit); + Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD, ElementType.LONG); + + List results = new ArrayList<>(); + OperatorTestCase.runDriver( + new Driver(ctx, factory.get(ctx), List.of(readS.get(ctx)), new TestResultPageSinkOperator(results::add), () -> {}) + ); + OperatorTestCase.assertDriverContext(ctx); + + long expectedS = 0; + int maxPageSize = factory.maxPageSize(); + for (Page page : results) { + if (limit - expectedS < maxPageSize) { + assertThat(page.getPositionCount(), equalTo((int) (limit - expectedS))); + } else { + assertThat(page.getPositionCount(), equalTo(maxPageSize)); + } + DoubleBlock sBlock = page.getBlock(1); + for (int p = 0; p < page.getPositionCount(); p++) { + assertThat(sBlock.getDouble(sBlock.getFirstValueIndex(p)), equalTo(1.0d)); + expectedS++; + } + } + int pages = (int) Math.ceil((float) Math.min(size, limit) / maxPageSize); + assertThat(results, hasSize(pages)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java index 938c4ce5c9f7d..d9a0b70b7931e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java @@ -20,6 +20,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.compute.data.DocBlock; +import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -56,7 +58,7 @@ public class LuceneTopNSourceOperatorTests extends AnyOperatorTestCase { private IndexReader reader; @After - public void closeIndex() throws IOException { + private void closeIndex() throws IOException { IOUtils.close(reader, directory); } @@ -105,19 +107,25 @@ public Optional buildSort(List> sorts) { taskConcurrency, maxPageSize, limit, - sorts + sorts, + scoring ); } @Override protected Matcher expectedToStringOfSimple() { - return matchesRegex("LuceneTopNSourceOperator\\[maxPageSize = \\d+, limit = 100, sorts = \\[\\{.+}]]"); + var s = scoring ? "COMPLETE" : "TOP_DOCS"; + return matchesRegex("LuceneTopNSourceOperator\\[maxPageSize = \\d+, limit = 100, scoreMode = " + s + ", sorts = \\[\\{.+}]]"); } @Override protected Matcher expectedDescriptionOfSimple() { + var s = scoring ? "COMPLETE" : "TOP_DOCS"; return matchesRegex( - "LuceneTopNSourceOperator\\[dataPartitioning = (DOC|SHARD|SEGMENT), maxPageSize = \\d+, limit = 100, sorts = \\[\\{.+}]]" + "LuceneTopNSourceOperator" + + "\\[dataPartitioning = (DOC|SHARD|SEGMENT), maxPageSize = \\d+, limit = 100, scoreMode = " + + s + + ", sorts = \\[\\{.+}]]" ); } @@ -137,12 +145,24 @@ public void testShardDataPartitioningWithCranky() { } } - private void testShardDataPartitioning(DriverContext context) { + void testShardDataPartitioning(DriverContext context) { int size = between(1_000, 20_000); int limit = between(10, size); testSimple(context, size, limit); } + public void testWithCranky() { + try { + int size = between(1_000, 20_000); + int limit = between(10, size); + testSimple(crankyDriverContext(), size, limit); + logger.info("cranky didn't break"); + } catch (CircuitBreakingException e) { + logger.info("broken", e); + assertThat(e.getMessage(), equalTo(CrankyCircuitBreakerService.ERROR_MESSAGE)); + } + } + public void testEmpty() { testEmpty(driverContext()); } @@ -157,11 +177,11 @@ public void testEmptyWithCranky() { } } - private void testEmpty(DriverContext context) { + void testEmpty(DriverContext context) { testSimple(context, 0, between(10, 10_000)); } - private void testSimple(DriverContext ctx, int size, int limit) { + protected void testSimple(DriverContext ctx, int size, int limit) { LuceneTopNSourceOperator.Factory factory = simple(DataPartitioning.SHARD, size, limit); Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory(reader, S_FIELD, ElementType.LONG); @@ -178,7 +198,7 @@ private void testSimple(DriverContext ctx, int size, int limit) { } else { assertThat(page.getPositionCount(), equalTo(factory.maxPageSize())); } - LongBlock sBlock = page.getBlock(1); + LongBlock sBlock = page.getBlock(initialBlockIndex(page)); for (int p = 0; p < page.getPositionCount(); p++) { assertThat(sBlock.getLong(sBlock.getFirstValueIndex(p)), equalTo(expectedS++)); } @@ -186,4 +206,18 @@ private void testSimple(DriverContext ctx, int size, int limit) { int pages = (int) Math.ceil((float) Math.min(size, limit) / factory.maxPageSize()); assertThat(results, hasSize(pages)); } + + // Scores are not interesting to this test, but enabled conditionally and effectively ignored just for coverage. + private final boolean scoring = randomBoolean(); + + // Returns the initial block index, ignoring the score block if scoring is enabled + private int initialBlockIndex(Page page) { + assert page.getBlock(0) instanceof DocBlock : "expected doc block at index 0"; + if (scoring) { + assert page.getBlock(1) instanceof DoubleBlock : "expected double block at index 1"; + return 2; + } else { + return 1; + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java index f6d81af7c14e5..f31573f121a71 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValueSourceReaderTypeConversionTests.java @@ -265,7 +265,8 @@ private SourceOperator simpleInput(DriverContext context, int size, int commitEv DataPartitioning.SHARD, 1,// randomIntBetween(1, 10), pageSize, - LuceneOperator.NO_LIMIT + LuceneOperator.NO_LIMIT, + false // no scoring ); return luceneFactory.get(context); } @@ -1292,7 +1293,8 @@ public void testWithNulls() throws IOException { randomFrom(DataPartitioning.values()), randomIntBetween(1, 10), randomPageSize(), - LuceneOperator.NO_LIMIT + LuceneOperator.NO_LIMIT, + false // no scoring ); var vsShardContext = new ValuesSourceReaderOperator.ShardContext(reader(indexKey), () -> SourceLoader.FROM_STORED_SOURCE); try ( @@ -1450,7 +1452,8 @@ public void testManyShards() throws IOException { DataPartitioning.SHARD, randomIntBetween(1, 10), 1000, - LuceneOperator.NO_LIMIT + LuceneOperator.NO_LIMIT, + false // no scoring ); // TODO add index2 MappedFieldType ft = mapperService(indexKey).fieldType("key"); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index c8dd6f87be5fc..95b313b0b5412 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -170,7 +170,8 @@ private SourceOperator simpleInput(DriverContext context, int size, int commitEv DataPartitioning.SHARD, randomIntBetween(1, 10), pageSize, - LuceneOperator.NO_LIMIT + LuceneOperator.NO_LIMIT, + false // no scoring ); return luceneFactory.get(context); } @@ -1301,7 +1302,8 @@ public void testWithNulls() throws IOException { randomFrom(DataPartitioning.values()), randomIntBetween(1, 10), randomPageSize(), - LuceneOperator.NO_LIMIT + LuceneOperator.NO_LIMIT, + false // no scoring ); try ( Driver driver = new Driver( @@ -1524,7 +1526,8 @@ public void testManyShards() throws IOException { DataPartitioning.SHARD, randomIntBetween(1, 10), 1000, - LuceneOperator.NO_LIMIT + LuceneOperator.NO_LIMIT, + false // no scoring ); MappedFieldType ft = mapperService.fieldType("key"); var readerFactory = new ValuesSourceReaderOperator.Factory( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index c0396fdc469aa..542bf5bc384a5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -209,8 +209,19 @@ List createDriversForInput(List input, List results, boolean randomIntBetween(2, 10), threadPool.relativeTimeInMillisSupplier() ); - ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(randomIntBetween(1, 4), threadPool.executor(ESQL_TEST_EXECUTOR)); - sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); + ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler( + randomIntBetween(1, 4), + threadPool.executor(ESQL_TEST_EXECUTOR), + ActionListener.noop() + ); + sourceExchanger.addRemoteSink( + sinkExchanger::fetchPageAsync, + randomBoolean(), + 1, + ActionListener.noop().delegateResponse((l, e) -> { + throw new AssertionError("unexpected failure", e); + }) + ); Iterator intermediateOperatorItr; int itrSize = (splitInput.size() * 3) + 3; // 3 inter ops per initial source drivers, and 3 per final diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java index f2fa94c1feb08..953c7d1c313f1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java @@ -54,11 +54,13 @@ protected Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { return new HashAggregationOperator.HashAggregationOperatorFactory( List.of(new BlockHash.GroupSpec(0, ElementType.LONG)), + mode, List.of( new SumLongAggregatorFunctionSupplier(sumChannels).groupingAggregatorFactory(mode), new MaxLongAggregatorFunctionSupplier(maxChannels).groupingAggregatorFactory(mode) ), - randomPageSize() + randomPageSize(), + null ); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java deleted file mode 100644 index cd8a49939fbb5..0000000000000 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.operator; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.TestBlockFactory; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESTestCase; - -import java.util.Arrays; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; - -public class RowOperatorTests extends ESTestCase { - final DriverContext driverContext = new DriverContext( - new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - TestBlockFactory.getNonBreakingInstance() - ); - - public void testBoolean() { - RowOperator.RowOperatorFactory factory = new RowOperator.RowOperatorFactory(List.of(false)); - assertThat(factory.describe(), equalTo("RowOperator[objects = false]")); - assertThat(factory.get(driverContext).toString(), equalTo("RowOperator[objects=[false]]")); - BooleanBlock block = factory.get(driverContext).getOutput().getBlock(0); - assertThat(block.getBoolean(0), equalTo(false)); - } - - public void testInt() { - RowOperator.RowOperatorFactory factory = new RowOperator.RowOperatorFactory(List.of(213)); - assertThat(factory.describe(), equalTo("RowOperator[objects = 213]")); - assertThat(factory.get(driverContext).toString(), equalTo("RowOperator[objects=[213]]")); - IntBlock block = factory.get(driverContext).getOutput().getBlock(0); - assertThat(block.getInt(0), equalTo(213)); - } - - public void testLong() { - RowOperator.RowOperatorFactory factory = new RowOperator.RowOperatorFactory(List.of(21321343214L)); - assertThat(factory.describe(), equalTo("RowOperator[objects = 21321343214]")); - assertThat(factory.get(driverContext).toString(), equalTo("RowOperator[objects=[21321343214]]")); - LongBlock block = factory.get(driverContext).getOutput().getBlock(0); - assertThat(block.getLong(0), equalTo(21321343214L)); - } - - public void testDouble() { - RowOperator.RowOperatorFactory factory = new RowOperator.RowOperatorFactory(List.of(2.0)); - assertThat(factory.describe(), equalTo("RowOperator[objects = 2.0]")); - assertThat(factory.get(driverContext).toString(), equalTo("RowOperator[objects=[2.0]]")); - DoubleBlock block = factory.get(driverContext).getOutput().getBlock(0); - assertThat(block.getDouble(0), equalTo(2.0)); - } - - public void testString() { - RowOperator.RowOperatorFactory factory = new RowOperator.RowOperatorFactory(List.of(new BytesRef("cat"))); - assertThat(factory.describe(), equalTo("RowOperator[objects = [63 61 74]]")); - assertThat(factory.get(driverContext).toString(), equalTo("RowOperator[objects=[[63 61 74]]]")); - BytesRefBlock block = factory.get(driverContext).getOutput().getBlock(0); - assertThat(block.getBytesRef(0, new BytesRef()), equalTo(new BytesRef("cat"))); - } - - public void testNull() { - RowOperator.RowOperatorFactory factory = new RowOperator.RowOperatorFactory(Arrays.asList(new Object[] { null })); - assertThat(factory.describe(), equalTo("RowOperator[objects = null]")); - assertThat(factory.get(driverContext).toString(), equalTo("RowOperator[objects=[null]]")); - Block block = factory.get(driverContext).getOutput().getBlock(0); - assertTrue(block.isNull(0)); - } -} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 9e07f9c8f5faf..4178f02898d79 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -22,8 +23,8 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; @@ -56,6 +57,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.function.Supplier; @@ -63,6 +65,7 @@ import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; public class ExchangeServiceTests extends ESTestCase { @@ -94,11 +97,10 @@ public void testBasic() throws Exception { ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(blockFactory, 2, threadPool.relativeTimeInMillisSupplier()); ExchangeSink sink1 = sinkExchanger.createExchangeSink(); ExchangeSink sink2 = sinkExchanger.createExchangeSink(); - ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(3, threadPool.executor(ESQL_TEST_EXECUTOR)); PlainActionFuture sourceCompletion = new PlainActionFuture<>(); - sourceExchanger.addCompletionListener(sourceCompletion); + ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(3, threadPool.executor(ESQL_TEST_EXECUTOR), sourceCompletion); ExchangeSource source = sourceExchanger.createExchangeSource(); - sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); + sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, randomBoolean(), 1, ActionListener.noop()); SubscribableListener waitForReading = source.waitForReading().listener(); assertFalse(waitForReading.isDone()); assertNull(source.pollPage()); @@ -263,7 +265,7 @@ public void close() { } } - void runConcurrentTest( + Set runConcurrentTest( int maxInputSeqNo, int maxOutputSeqNo, Supplier exchangeSource, @@ -318,16 +320,17 @@ protected void start(Driver driver, ActionListener listener) { } }.runToCompletion(drivers, future); future.actionGet(TimeValue.timeValueMinutes(1)); - var expectedSeqNos = IntStream.range(0, Math.min(maxInputSeqNo, maxOutputSeqNo)).boxed().collect(Collectors.toSet()); - assertThat(seqNoCollector.receivedSeqNos, hasSize(expectedSeqNos.size())); - assertThat(seqNoCollector.receivedSeqNos, equalTo(expectedSeqNos)); + return seqNoCollector.receivedSeqNos; } public void testConcurrentWithHandlers() { BlockFactory blockFactory = blockFactory(); PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); - var sourceExchanger = new ExchangeSourceHandler(randomExchangeBuffer(), threadPool.executor(ESQL_TEST_EXECUTOR)); - sourceExchanger.addCompletionListener(sourceCompletionFuture); + var sourceExchanger = new ExchangeSourceHandler( + randomExchangeBuffer(), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); List sinkHandlers = new ArrayList<>(); Supplier exchangeSink = () -> { final ExchangeSinkHandler sinkHandler; @@ -335,17 +338,89 @@ public void testConcurrentWithHandlers() { sinkHandler = randomFrom(sinkHandlers); } else { sinkHandler = new ExchangeSinkHandler(blockFactory, randomExchangeBuffer(), threadPool.relativeTimeInMillisSupplier()); - sourceExchanger.addRemoteSink(sinkHandler::fetchPageAsync, randomIntBetween(1, 3)); + sourceExchanger.addRemoteSink(sinkHandler::fetchPageAsync, randomBoolean(), randomIntBetween(1, 3), ActionListener.noop()); sinkHandlers.add(sinkHandler); } return sinkHandler.createExchangeSink(); }; final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); - runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceExchanger::createExchangeSource, exchangeSink); + Set actualSeqNos = runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceExchanger::createExchangeSource, exchangeSink); + var expectedSeqNos = IntStream.range(0, Math.min(maxInputSeqNo, maxOutputSeqNo)).boxed().collect(Collectors.toSet()); + assertThat(actualSeqNos, hasSize(expectedSeqNos.size())); + assertThat(actualSeqNos, equalTo(expectedSeqNos)); sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS); } + public void testExchangeSourceContinueOnFailure() { + BlockFactory blockFactory = blockFactory(); + PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); + var exchangeSourceHandler = new ExchangeSourceHandler( + randomExchangeBuffer(), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); + final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); + final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); + Set expectedSeqNos = ConcurrentCollections.newConcurrentSet(); + AtomicInteger failedRequests = new AtomicInteger(); + AtomicInteger totalSinks = new AtomicInteger(); + AtomicInteger failedSinks = new AtomicInteger(); + AtomicInteger completedSinks = new AtomicInteger(); + Supplier exchangeSink = () -> { + var sinkHandler = new ExchangeSinkHandler(blockFactory, randomExchangeBuffer(), threadPool.relativeTimeInMillisSupplier()); + int failAfter = randomBoolean() ? Integer.MAX_VALUE : randomIntBetween(0, 100); + AtomicInteger fetched = new AtomicInteger(); + int instance = randomIntBetween(1, 3); + totalSinks.incrementAndGet(); + AtomicBoolean sinkFailed = new AtomicBoolean(); + exchangeSourceHandler.addRemoteSink((allSourcesFinished, listener) -> { + if (fetched.incrementAndGet() > failAfter) { + sinkHandler.fetchPageAsync(true, listener.delegateFailure((l, r) -> { + failedRequests.incrementAndGet(); + sinkFailed.set(true); + listener.onFailure(new CircuitBreakingException("simulated", CircuitBreaker.Durability.PERMANENT)); + })); + } else { + sinkHandler.fetchPageAsync(allSourcesFinished, listener.delegateFailure((l, r) -> { + Page page = r.takePage(); + if (page != null) { + IntBlock block = page.getBlock(0); + for (int i = 0; i < block.getPositionCount(); i++) { + int v = block.getInt(i); + if (v < maxOutputSeqNo) { + expectedSeqNos.add(v); + } + } + } + l.onResponse(new ExchangeResponse(blockFactory, page, r.finished())); + })); + } + }, false, instance, ActionListener.wrap(r -> { + assertFalse(sinkFailed.get()); + completedSinks.incrementAndGet(); + }, e -> { + assertTrue(sinkFailed.get()); + failedSinks.incrementAndGet(); + })); + return sinkHandler.createExchangeSink(); + }; + Set actualSeqNos = runConcurrentTest( + maxInputSeqNo, + maxOutputSeqNo, + exchangeSourceHandler::createExchangeSource, + exchangeSink + ); + assertThat(actualSeqNos, equalTo(expectedSeqNos)); + assertThat(completedSinks.get() + failedSinks.get(), equalTo(totalSinks.get())); + sourceCompletionFuture.actionGet(); + if (failedRequests.get() > 0) { + assertThat(failedSinks.get(), greaterThan(0)); + } else { + assertThat(failedSinks.get(), equalTo(0)); + } + } + public void testEarlyTerminate() { BlockFactory blockFactory = blockFactory(); IntBlock block1 = blockFactory.newConstantIntBlockWith(1, 2); @@ -374,19 +449,44 @@ public void testConcurrentWithTransportActions() { ExchangeService exchange1 = new ExchangeService(Settings.EMPTY, threadPool, ESQL_TEST_EXECUTOR, blockFactory()); exchange1.registerTransportHandler(node1); AbstractSimpleTransportTestCase.connectToNode(node0, node1.getLocalNode()); + Set finishingRequests = ConcurrentCollections.newConcurrentSet(); + node1.addRequestHandlingBehavior(ExchangeService.EXCHANGE_ACTION_NAME, (handler, request, channel, task) -> { + final ExchangeRequest exchangeRequest = (ExchangeRequest) request; + if (exchangeRequest.sourcesFinished()) { + String exchangeId = exchangeRequest.exchangeId(); + assertTrue("tried to finish [" + exchangeId + "] twice", finishingRequests.add(exchangeId)); + } + handler.messageReceived(request, channel, task); + }); try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - var sourceHandler = new ExchangeSourceHandler(randomExchangeBuffer(), threadPool.executor(ESQL_TEST_EXECUTOR)); PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); - sourceHandler.addCompletionListener(sourceCompletionFuture); + var sourceHandler = new ExchangeSourceHandler( + randomExchangeBuffer(), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomExchangeBuffer()); Transport.Connection connection = node0.getConnection(node1.getLocalNode()); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); + sourceHandler.addRemoteSink( + exchange0.newRemoteSink(task, exchangeId, node0, connection), + randomBoolean(), + randomIntBetween(1, 5), + ActionListener.noop() + ); final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); - runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink); + Set actualSeqNos = runConcurrentTest( + maxInputSeqNo, + maxOutputSeqNo, + sourceHandler::createExchangeSource, + sinkHandler::createExchangeSink + ); + var expectedSeqNos = IntStream.range(0, Math.min(maxInputSeqNo, maxOutputSeqNo)).boxed().collect(Collectors.toSet()); + assertThat(actualSeqNos, hasSize(expectedSeqNos.size())); + assertThat(actualSeqNos, equalTo(expectedSeqNos)); sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS); } } @@ -437,12 +537,20 @@ public void sendResponse(TransportResponse transportResponse) { try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - var sourceHandler = new ExchangeSourceHandler(randomIntBetween(1, 128), threadPool.executor(ESQL_TEST_EXECUTOR)); PlainActionFuture sourceCompletionFuture = new PlainActionFuture<>(); - sourceHandler.addCompletionListener(sourceCompletionFuture); + var sourceHandler = new ExchangeSourceHandler( + randomIntBetween(1, 128), + threadPool.executor(ESQL_TEST_EXECUTOR), + sourceCompletionFuture + ); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomIntBetween(1, 128)); Transport.Connection connection = node0.getConnection(node1.getLocalNode()); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); + sourceHandler.addRemoteSink( + exchange0.newRemoteSink(task, exchangeId, node0, connection), + true, + randomIntBetween(1, 5), + ActionListener.noop() + ); Exception err = expectThrows( Exception.class, () -> runConcurrentTest(maxSeqNo, maxSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink) @@ -451,13 +559,13 @@ public void sendResponse(TransportResponse transportResponse) { assertNotNull(cause); assertThat(cause.getMessage(), equalTo("page is too large")); sinkHandler.onFailure(new RuntimeException(cause)); - sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS); + expectThrows(Exception.class, () -> sourceCompletionFuture.actionGet(10, TimeUnit.SECONDS)); } } private MockTransportService newTransportService() { List namedWriteables = new ArrayList<>(ClusterModule.getNamedWriteables()); - namedWriteables.addAll(Block.getNamedWriteables()); + namedWriteables.addAll(BlockWritables.getNamedWriteables()); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperatorTests.java index 6daace76dd8b8..2af52b6bab5a8 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/lookup/EnrichQuerySourceOperatorTests.java @@ -32,6 +32,8 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.Warnings; import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -120,7 +122,8 @@ public void testQueries() throws Exception { // 3 -> [] -> [] // 4 -> [a1] -> [3] // 5 -> [] -> [] - EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, 128, queryList, reader); + var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test enrich"); + EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, 128, queryList, reader, warnings); Page p0 = queryOperator.getOutput(); assertNotNull(p0); assertThat(p0.getPositionCount(), equalTo(6)); @@ -187,7 +190,8 @@ public void testRandomMatchQueries() throws Exception { MappedFieldType uidField = new KeywordFieldMapper.KeywordFieldType("uid"); var queryList = QueryList.rawTermQueryList(uidField, mock(SearchExecutionContext.class), inputTerms); int maxPageSize = between(1, 256); - EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, maxPageSize, queryList, reader); + var warnings = Warnings.createWarnings(DriverContext.WarningsMode.IGNORE, 0, 0, "test enrich"); + EnrichQuerySourceOperator queryOperator = new EnrichQuerySourceOperator(blockFactory, maxPageSize, queryList, reader, warnings); Map> actualPositions = new HashMap<>(); while (queryOperator.isFinished() == false) { Page page = queryOperator.getOutput(); diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index fb47255e8d52e..eac5d5764d4b2 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -1,8 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.util.GradleUtils -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -31,7 +36,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> def javaRestTest = tasks.register("v${bwcVersion}#javaRestTest", StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle index aa19371685ce1..7f3859e2229ef 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle @@ -6,7 +6,6 @@ */ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -15,6 +14,7 @@ apply plugin: 'elasticsearch.bwc-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) javaRestTestImplementation project(xpackModule('esql:qa:server')) + javaRestTestImplementation project(xpackModule('esql')) } def supportedVersion = bwcVersion -> { @@ -22,7 +22,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter(Version.fromString("8.13.0")); } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 62391c8ca001a..af5eadc7358a2 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -45,6 +45,11 @@ import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.ENRICH_SOURCE_INDICES; import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V3; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -101,16 +106,26 @@ public MultiClusterSpecIT( @Override protected void shouldSkipTest(String testName) throws IOException { + boolean remoteMetadata = testCase.requiredCapabilities.contains(METADATA_FIELDS_REMOTE_TEST.capabilityName()); + if (remoteMetadata) { + // remove the capability from the test to enable it + testCase.requiredCapabilities = testCase.requiredCapabilities.stream() + .filter(c -> c.equals("metadata_fields_remote_test") == false) + .toList(); + } super.shouldSkipTest(testName); checkCapabilities(remoteClusterClient(), remoteFeaturesService(), testName, testCase); - assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); + // Do not run tests including "METADATA _index" unless marked with metadata_fields_remote_test, + // because they may produce inconsistent results with multiple clusters. + assumeFalse("can't test with _index metadata", (remoteMetadata == false) && hasIndexMetadata(testCase.query)); assumeTrue( "Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, instructions, Clusters.oldVersion()) ); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats")); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("inlinestats_v2")); - assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains("join_planning_v1")); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); + assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V3.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -151,6 +166,9 @@ protected RestClient buildClient(Settings settings, HttpHost[] localHosts) throw return twoClients(localClient, remoteClient); } + // These indices are used in metadata tests so we want them on remote only for consistency + public static final List METADATA_INDICES = List.of("employees", "apps", "ul_logs"); + /** * Creates a new mock client that dispatches every request to both the local and remote clusters, excluding _bulk and _query requests. * - '_bulk' requests are randomly sent to either the local or remote cluster to populate data. Some spec tests, such as AVG, @@ -166,6 +184,8 @@ static RestClient twoClients(RestClient localClient, RestClient remoteClient) th String endpoint = request.getEndpoint(); if (endpoint.startsWith("/_query")) { return localClient.performRequest(request); + } else if (endpoint.endsWith("/_bulk") && METADATA_INDICES.stream().anyMatch(i -> endpoint.equals("/" + i + "/_bulk"))) { + return remoteClient.performRequest(request); } else if (endpoint.endsWith("/_bulk") && ENRICH_SOURCE_INDICES.stream().noneMatch(i -> endpoint.equals("/" + i + "/_bulk"))) { return bulkClient.performRequest(request); } else { @@ -203,6 +223,9 @@ static Request[] cloneRequests(Request orig, int numClones) throws IOException { return clones; } + /** + * Convert FROM employees ... => FROM *:employees,employees + */ static CsvSpecReader.CsvTestCase convertToRemoteIndices(CsvSpecReader.CsvTestCase testCase) { String query = testCase.query; String[] commands = query.split("\\|"); diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 6ebf05755ef5e..2484a428c4b03 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -51,7 +51,6 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; -import java.util.stream.Stream; import static org.apache.lucene.geo.GeoEncodingUtils.decodeLatitude; import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; @@ -173,7 +172,8 @@ public final void test() throws Throwable { } protected void shouldSkipTest(String testName) throws IOException { - if (testCase.requiredCapabilities.contains("semantic_text_type")) { + if (testCase.requiredCapabilities.contains("semantic_text_type") + || testCase.requiredCapabilities.contains("semantic_text_aggregations")) { assumeTrue("Inference test service needs to be supported for semantic_text", supportsInferenceTestService()); } checkCapabilities(adminClient(), testFeatureService, testName, testCase); @@ -207,10 +207,7 @@ protected static void checkCapabilities(RestClient client, TestFeatureService te } } - var features = Stream.concat( - new EsqlFeatures().getFeatures().stream(), - new EsqlFeatures().getHistoricalFeatures().keySet().stream() - ).map(NodeFeature::id).collect(Collectors.toSet()); + var features = new EsqlFeatures().getFeatures().stream().map(NodeFeature::id).collect(Collectors.toSet()); for (String feature : testCase.requiredCapabilities) { var esqlFeature = "esql." + feature; diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java index d124fdb5755c3..813354db697e1 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.index.mapper.BlockLoader; @@ -27,6 +28,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.hamcrest.Matcher; import org.junit.Before; @@ -1106,6 +1108,323 @@ public void testTypeConflictInObject() throws IOException { ); } + /** + * Test for https://github.com/elastic/elasticsearch/issues/117054 fix + */ + public void testOneNestedSubField_AndSameNameSupportedField() throws IOException { + assumeIndexResolverNestedFieldsNameClashFixed(); + ESRestTestCase.createIndex("test", Settings.EMPTY, """ + "properties": { + "Responses": { + "properties": { + "process": { + "type": "nested", + "properties": { + "pid": { + "type": "long" + } + } + } + } + }, + "process": { + "properties": { + "parent": { + "properties": { + "command_line": { + "type": "wildcard", + "fields": { + "text": { + "type": "text" + } + } + } + } + } + } + } + } + """); + + Map result = runEsql("FROM test"); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("process.parent.command_line", "keyword"), columnInfo("process.parent.command_line.text", "text")) + ).entry("values", Collections.EMPTY_LIST) + ); + + index("test", """ + {"Responses.process.pid": 123,"process.parent.command_line":"run.bat"}"""); + + result = runEsql("FROM test"); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("process.parent.command_line", "keyword"), columnInfo("process.parent.command_line.text", "text")) + ).entry("values", List.of(matchesList().item("run.bat").item("run.bat"))) + ); + + result = runEsql(""" + FROM test | where process.parent.command_line == "run.bat" + """); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("process.parent.command_line", "keyword"), columnInfo("process.parent.command_line.text", "text")) + ).entry("values", List.of(matchesList().item("run.bat").item("run.bat"))) + ); + + ResponseException e = expectThrows(ResponseException.class, () -> runEsql("FROM test | SORT Responses.process.pid")); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("line 1:18: Unknown column [Responses.process.pid]")); + + e = expectThrows(ResponseException.class, () -> runEsql(""" + FROM test + | SORT Responses.process.pid + | WHERE Responses.process IS NULL + """)); + err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("line 2:8: Unknown column [Responses.process.pid]")); + } + + public void testOneNestedSubField_AndSameNameSupportedField_TwoIndices() throws IOException { + assumeIndexResolverNestedFieldsNameClashFixed(); + ESRestTestCase.createIndex("test1", Settings.EMPTY, """ + "properties": { + "Responses": { + "properties": { + "process": { + "type": "nested", + "properties": { + "pid": { + "type": "long" + } + } + } + } + } + } + """); + ESRestTestCase.createIndex("test2", Settings.EMPTY, """ + "properties": { + "process": { + "properties": { + "parent": { + "properties": { + "command_line": { + "type": "wildcard", + "fields": { + "text": { + "type": "text" + } + } + } + } + } + } + } + } + """); + index("test1", """ + {"Responses.process.pid": 123}"""); + index("test2", """ + {"process.parent.command_line":"run.bat"}"""); + + Map result = runEsql("FROM test* | SORT process.parent.command_line ASC NULLS FIRST"); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("process.parent.command_line", "keyword"), columnInfo("process.parent.command_line.text", "text")) + ).entry("values", List.of(matchesList().item(null).item(null), matchesList().item("run.bat").item("run.bat"))) + ); + + result = runEsql(""" + FROM test* | where process.parent.command_line == "run.bat" + """); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of(columnInfo("process.parent.command_line", "keyword"), columnInfo("process.parent.command_line.text", "text")) + ).entry("values", List.of(matchesList().item("run.bat").item("run.bat"))) + ); + + ResponseException e = expectThrows(ResponseException.class, () -> runEsql("FROM test* | SORT Responses.process.pid")); + String err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("line 1:19: Unknown column [Responses.process.pid]")); + + e = expectThrows(ResponseException.class, () -> runEsql(""" + FROM test* + | SORT Responses.process.pid + | WHERE Responses.process IS NULL + """)); + err = EntityUtils.toString(e.getResponse().getEntity()); + assertThat(err, containsString("line 2:8: Unknown column [Responses.process.pid]")); + } + + public void testOneNestedField_AndSameNameSupportedField_TwoIndices() throws IOException { + assumeIndexResolverNestedFieldsNameClashFixed(); + ESRestTestCase.createIndex("test1", Settings.EMPTY, """ + "properties": { + "Responses": { + "properties": { + "process": { + "type": "nested", + "properties": { + "pid": { + "type": "long" + } + } + } + } + }, + "process": { + "properties": { + "parent": { + "properties": { + "command_line": { + "type": "wildcard", + "fields": { + "text": { + "type": "text" + } + } + } + } + } + } + } + } + """); + ESRestTestCase.createIndex("test2", Settings.EMPTY, """ + "properties": { + "Responses": { + "properties": { + "process": { + "type": "integer", + "fields": { + "pid": { + "type": "long" + } + } + } + } + }, + "process": { + "properties": { + "parent": { + "properties": { + "command_line": { + "type": "wildcard", + "fields": { + "text": { + "type": "text" + } + } + } + } + } + } + } + } + """); + index("test1", """ + {"Responses.process.pid": 111,"process.parent.command_line":"run1.bat"}"""); + index("test2", """ + {"Responses.process": 222,"process.parent.command_line":"run2.bat"}"""); + + Map result = runEsql("FROM test* | SORT process.parent.command_line"); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of( + columnInfo("Responses.process", "integer"), + columnInfo("Responses.process.pid", "long"), + columnInfo("process.parent.command_line", "keyword"), + columnInfo("process.parent.command_line.text", "text") + ) + ) + .entry( + "values", + List.of( + matchesList().item(null).item(null).item("run1.bat").item("run1.bat"), + matchesList().item(222).item(222).item("run2.bat").item("run2.bat") + ) + ) + ); + + result = runEsql(""" + FROM test* | where Responses.process.pid == 111 + """); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of( + columnInfo("Responses.process", "integer"), + columnInfo("Responses.process.pid", "long"), + columnInfo("process.parent.command_line", "keyword"), + columnInfo("process.parent.command_line.text", "text") + ) + ).entry("values", List.of()) + ); + + result = runEsql("FROM test* | SORT process.parent.command_line"); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of( + columnInfo("Responses.process", "integer"), + columnInfo("Responses.process.pid", "long"), + columnInfo("process.parent.command_line", "keyword"), + columnInfo("process.parent.command_line.text", "text") + ) + ) + .entry( + "values", + List.of( + matchesList().item(null).item(null).item("run1.bat").item("run1.bat"), + matchesList().item(222).item(222).item("run2.bat").item("run2.bat") + ) + ) + ); + + result = runEsql(""" + FROM test* + | SORT process.parent.command_line + | WHERE Responses.process IS NULL + """); + assertMap( + result, + matchesMapWithOptionalTook(result.get("took")).entry( + "columns", + List.of( + columnInfo("Responses.process", "integer"), + columnInfo("Responses.process.pid", "long"), + columnInfo("process.parent.command_line", "keyword"), + columnInfo("process.parent.command_line.text", "text") + ) + ).entry("values", List.of(matchesList().item(null).item(null).item("run1.bat").item("run1.bat"))) + ); + } + + private void assumeIndexResolverNestedFieldsNameClashFixed() throws IOException { + // especially for BWC tests but also for regular tests + var capsName = EsqlCapabilities.Cap.FIX_NESTED_FIELDS_NAME_CLASH_IN_INDEXRESOLVER.name().toLowerCase(Locale.ROOT); + boolean requiredClusterCapability = clusterHasCapability("POST", "/_query", List.of(), List.of(capsName)).orElse(false); + assumeTrue( + "This test makes sense for versions that have the fix for https://github.com/elastic/elasticsearch/issues/117054", + requiredClusterCapability + ); + } + private CheckedConsumer empNoInObject(String empNoType) { return index -> { index.startObject("properties"); @@ -1456,16 +1775,12 @@ private static void index(String name, String... docs) throws IOException { } private static void createIndex(String name, CheckedConsumer mapping) throws IOException { - Request request = new Request("PUT", "/" + name); XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject(); - index.startObject("mappings"); mapping.accept(index); index.endObject(); - index.endObject(); String configStr = Strings.toString(index); logger.info("index: {} {}", name, configStr); - request.setJsonEntity(configStr); - client().performRequest(request); + ESRestTestCase.createIndex(name, Settings.EMPTY, configStr); } /** diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 478c68db68aa7..9c987a02aca2d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -56,9 +56,12 @@ public class CsvTestsDataLoader { private static final TestsDataset APPS = new TestsDataset("apps"); private static final TestsDataset APPS_SHORT = APPS.withIndex("apps_short").withTypeMapping(Map.of("id", "short")); private static final TestsDataset LANGUAGES = new TestsDataset("languages"); + private static final TestsDataset LANGUAGES_LOOKUP = LANGUAGES.withIndex("languages_lookup") + .withSetting("languages_lookup-settings.json"); private static final TestsDataset ALERTS = new TestsDataset("alerts"); private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs"); private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data"); + private static final TestsDataset MV_SAMPLE_DATA = new TestsDataset("mv_sample_data"); private static final TestsDataset SAMPLE_DATA_STR = SAMPLE_DATA.withIndex("sample_data_str") .withTypeMapping(Map.of("client_ip", "keyword")); private static final TestsDataset SAMPLE_DATA_TS_LONG = SAMPLE_DATA.withIndex("sample_data_ts_long") @@ -99,8 +102,10 @@ public class CsvTestsDataLoader { Map.entry(APPS.indexName, APPS), Map.entry(APPS_SHORT.indexName, APPS_SHORT), Map.entry(LANGUAGES.indexName, LANGUAGES), + Map.entry(LANGUAGES_LOOKUP.indexName, LANGUAGES_LOOKUP), Map.entry(UL_LOGS.indexName, UL_LOGS), Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), + Map.entry(MV_SAMPLE_DATA.indexName, MV_SAMPLE_DATA), Map.entry(ALERTS.indexName, ALERTS), Map.entry(SAMPLE_DATA_STR.indexName, SAMPLE_DATA_STR), Map.entry(SAMPLE_DATA_TS_LONG.indexName, SAMPLE_DATA_TS_LONG), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index 2913401d8aab3..d6715a932c075 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -29,6 +29,7 @@ import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -342,7 +343,7 @@ public String toString() { public static final Configuration TEST_CFG = configuration(new QueryPragmas(Settings.EMPTY)); - public static final Verifier TEST_VERIFIER = new Verifier(new Metrics(new EsqlFunctionRegistry())); + public static final Verifier TEST_VERIFIER = new Verifier(new Metrics(new EsqlFunctionRegistry()), new XPackLicenseState(() -> 0L)); private EsqlTestUtils() {} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index adbf24cee10b0..1e23cf62917fc 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -63,7 +63,6 @@ avg(salary):double | always_false:boolean in -required_capability: mv_warn from employees | keep emp_no, is_rehired, still_hired | where is_rehired in (still_hired, true) | where is_rehired != still_hired; ignoreOrder:true diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec index b8569ead94509..7bbf011176693 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/bucket.csv-spec @@ -716,3 +716,63 @@ FROM employees 2 |1985-10-01T00:00:00.000Z 4 |1985-11-01T00:00:00.000Z ; + +bucketByWeekInString +required_capability: implicit_casting_string_literal_to_temporal_amount +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| STATS hires_per_week = COUNT(*) BY week = BUCKET(hire_date, "1 week") +| SORT week +; + + hires_per_week:long | week:date +2 |1985-02-18T00:00:00.000Z +1 |1985-05-13T00:00:00.000Z +1 |1985-07-08T00:00:00.000Z +1 |1985-09-16T00:00:00.000Z +2 |1985-10-14T00:00:00.000Z +4 |1985-11-18T00:00:00.000Z +; + +bucketByMinuteInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| STATS min = min(@timestamp), max = MAX(@timestamp) BY bucket = BUCKET(@timestamp, "30 minutes") +| SORT min +; + + min:date | max:date | bucket:date +2023-10-23T12:15:03.360Z|2023-10-23T12:27:28.948Z|2023-10-23T12:00:00.000Z +2023-10-23T13:33:34.937Z|2023-10-23T13:55:01.543Z|2023-10-23T13:30:00.000Z +; + +bucketByMonthInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| EVAL adjusted = CASE(TO_LONG(@timestamp) % 2 == 0, @timestamp + 1 month, @timestamp + 2 years) +| STATS c = COUNT(*) BY b = BUCKET(adjusted, "1 month") +| SORT c +; + +c:long |b:date +3 |2025-10-01T00:00:00.000Z +4 |2023-11-01T00:00:00.000Z +; + +bucketWithFilteredCountRefingBucket +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| STATS c = COUNT(*) WHERE b > "1953-01-01T00:00:00.000Z" AND emp_no > 10020 BY b = BUCKET(birth_date, 1 year) +| SORT c, b +| LIMIT 4 +; + +c:long |b:date +0 |1952-01-01T00:00:00.000Z +0 |1953-01-01T00:00:00.000Z +0 |null +1 |1965-01-01T00:00:00.000Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec index 8e0fcd78f0322..e45b10d1aa122 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/categorize.csv-spec @@ -1,14 +1,549 @@ -categorize -required_capability: categorize +standard aggs +required_capability: categorize_v4 FROM sample_data - | SORT message ASC - | STATS count=COUNT(), values=MV_SORT(VALUES(message)) BY category=CATEGORIZE(message) + | STATS count=COUNT(), + sum=SUM(event_duration), + avg=AVG(event_duration), + count_distinct=COUNT_DISTINCT(event_duration) + BY category=CATEGORIZE(message) + | SORT count DESC, category +; + +count:long | sum:long | avg:double | count_distinct:long | category:keyword + 3 | 7971589 | 2657196.3333333335 | 3 | .*?Connected.+?to.*? + 3 | 14027356 | 4675785.333333333 | 3 | .*?Connection.+?error.*? + 1 | 1232382 | 1232382.0 | 1 | .*?Disconnected.*? +; + +values aggs +required_capability: categorize_v4 + +FROM sample_data + | STATS values=MV_SORT(VALUES(message)), + top=TOP(event_duration, 2, "DESC") + BY category=CATEGORIZE(message) + | SORT category +; + +values:keyword | top:long | category:keyword +[Connected to 10.1.0.1, Connected to 10.1.0.2, Connected to 10.1.0.3] | [3450233, 2764889] | .*?Connected.+?to.*? +[Connection error] | [8268153, 5033755] | .*?Connection.+?error.*? +[Disconnected] | 1232382 | .*?Disconnected.*? +; + +mv +required_capability: categorize_v4 + +FROM mv_sample_data + | STATS COUNT(), SUM(event_duration) BY category=CATEGORIZE(message) + | SORT category +; + +COUNT():long | SUM(event_duration):long | category:keyword + 7 | 23231327 | .*?Banana.*? + 3 | 7971589 | .*?Connected.+?to.*? + 3 | 14027356 | .*?Connection.+?error.*? + 1 | 1232382 | .*?Disconnected.*? +; + +row mv +required_capability: categorize_v4 + +ROW message = ["connected to a", "connected to b", "disconnected"], str = ["a", "b", "c"] + | STATS COUNT(), VALUES(str) BY category=CATEGORIZE(message) + | SORT category +; + +COUNT():long | VALUES(str):keyword | category:keyword + 2 | [a, b, c] | .*?connected.+?to.*? + 1 | [a, b, c] | .*?disconnected.*? +; + +skips stopwords +required_capability: categorize_v4 + +ROW message = ["Mon Tue connected to a", "Jul Aug connected to b September ", "UTC connected GMT to c UTC"] + | STATS COUNT() BY category=CATEGORIZE(message) + | SORT category +; + +COUNT():long | category:keyword + 3 | .*?connected.+?to.*? +; + +with multiple indices +required_capability: categorize_v4 +required_capability: union_types + +FROM sample_data* + | STATS COUNT() BY category=CATEGORIZE(message) | SORT category ; -count:long | values:keyword | category:integer -3 | [Connected to 10.1.0.1, Connected to 10.1.0.2, Connected to 10.1.0.3] | 0 -3 | [Connection error] | 1 -1 | [Disconnected] | 2 +COUNT():long | category:keyword + 12 | .*?Connected.+?to.*? + 12 | .*?Connection.+?error.*? + 4 | .*?Disconnected.*? +; + +mv with many values +required_capability: categorize_v4 + +FROM employees + | STATS COUNT() BY category=CATEGORIZE(job_positions) + | SORT category + | LIMIT 5 +; + +COUNT():long | category:keyword + 18 | .*?Accountant.*? + 13 | .*?Architect.*? + 11 | .*?Business.+?Analyst.*? + 13 | .*?Data.+?Scientist.*? + 10 | .*?Head.+?Human.+?Resources.*? +; + +mv with many values and SUM +required_capability: categorize_v4 + +FROM employees + | STATS SUM(languages) BY category=CATEGORIZE(job_positions) + | SORT category + | LIMIT 3 +; + +SUM(languages):long | category:keyword + 43 | .*?Accountant.*? + 46 | .*?Architect.*? + 35 | .*?Business.+?Analyst.*? +; + +mv with many values and nulls and SUM +required_capability: categorize_v4 + +FROM employees + | STATS SUM(languages) BY category=CATEGORIZE(job_positions) + | SORT category DESC + | LIMIT 2 +; + +SUM(languages):long | category:keyword + 27 | null + 46 | .*?Tech.+?Lead.*? +; + +mv via eval +required_capability: categorize_v4 + +FROM sample_data + | EVAL message = MV_APPEND(message, "Banana") + | STATS COUNT() BY category=CATEGORIZE(message) + | SORT category +; + +COUNT():long | category:keyword + 7 | .*?Banana.*? + 3 | .*?Connected.+?to.*? + 3 | .*?Connection.+?error.*? + 1 | .*?Disconnected.*? +; + +mv via eval const +required_capability: categorize_v4 + +FROM sample_data + | EVAL message = ["Banana", "Bread"] + | STATS COUNT() BY category=CATEGORIZE(message) + | SORT category +; + +COUNT():long | category:keyword + 7 | .*?Banana.*? + 7 | .*?Bread.*? +; + +mv via eval const without aliases +required_capability: categorize_v4 + +FROM sample_data + | EVAL message = ["Banana", "Bread"] + | STATS COUNT() BY CATEGORIZE(message) + | SORT `CATEGORIZE(message)` +; + +COUNT():long | CATEGORIZE(message):keyword + 7 | .*?Banana.*? + 7 | .*?Bread.*? +; + +mv const in parameter +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY c = CATEGORIZE(["Banana", "Bread"]) + | SORT c +; + +COUNT():long | c:keyword + 7 | .*?Banana.*? + 7 | .*?Bread.*? +; + +agg alias shadowing +required_capability: categorize_v4 + +FROM sample_data + | STATS c = COUNT() BY c = CATEGORIZE(["Banana", "Bread"]) + | SORT c +; + +warning:Line 2:9: Field 'c' shadowed by field at line 2:24 + +c:keyword +.*?Banana.*? +.*?Bread.*? +; + +chained aggregations using categorize +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(message) + | STATS COUNT() BY category=CATEGORIZE(category) + | SORT category +; + +COUNT():long | category:keyword + 1 | .*?Connected.+?to.*? + 1 | .*?Connection.+?error.*? + 1 | .*?Disconnected.*? +; + +stats without aggs +required_capability: categorize_v4 + +FROM sample_data + | STATS BY category=CATEGORIZE(message) + | SORT category +; + +category:keyword +.*?Connected.+?to.*? +.*?Connection.+?error.*? +.*?Disconnected.*? +; + +text field +required_capability: categorize_v4 + +FROM hosts + | STATS COUNT() BY category=CATEGORIZE(host_group) + | SORT category +; + +COUNT():long | category:keyword + 2 | .*?Gateway.+?instances.*? + 5 | .*?Kubernetes.+?cluster.*? + 2 | .*?servers.*? + 1 | null + +// Note: DB is removed from "DB servers", because the ml_standard +// tokenizer drops numbers, including hexadecimal ones. +; + +on TO_UPPER +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(TO_UPPER(message)) + | SORT category +; + +COUNT():long | category:keyword + 3 | .*?CONNECTED.+?TO.*? + 3 | .*?CONNECTION.+?ERROR.*? + 1 | .*?DISCONNECTED.*? +; + +on CONCAT +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(CONCAT(message, " banana")) + | SORT category +; + +COUNT():long | category:keyword + 3 | .*?Connected.+?to.+?banana.*? + 3 | .*?Connection.+?error.+?banana.*? + 1 | .*?Disconnected.+?banana.*? +; + +on CONCAT with unicode +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(CONCAT(message, " 👍🏽😊")) + | SORT category +; + +COUNT():long | category:keyword + 3 | .*?Connected.+?to.*? + 3 | .*?Connection.+?error.*? + 1 | .*?Disconnected.*? +; + +on REVERSE(CONCAT()) +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(REVERSE(CONCAT(message, " 👍🏽😊"))) + | SORT category +; + +COUNT():long | category:keyword + 1 | .*?detcennocsiD.*? + 3 | .*?ot.+?detcennoC.*? + 3 | .*?rorre.+?noitcennoC.*? +; + +and then TO_LOWER +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(message) + | EVAL category=TO_LOWER(category) + | SORT category +; + +COUNT():long | category:keyword + 3 | .*?connected.+?to.*? + 3 | .*?connection.+?error.*? + 1 | .*?disconnected.*? +; + +on const empty string +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE("") + | SORT category +; + +COUNT():long | category:keyword + 7 | null +; + +on const empty string from eval +required_capability: categorize_v4 + +FROM sample_data + | EVAL x = "" + | STATS COUNT() BY category=CATEGORIZE(x) + | SORT category +; + +COUNT():long | category:keyword + 7 | null +; + +on null +required_capability: categorize_v4 + +FROM sample_data + | EVAL x = null + | STATS COUNT(), SUM(event_duration) BY category=CATEGORIZE(x) + | SORT category +; + +COUNT():long | SUM(event_duration):long | category:keyword + 7 | 23231327 | null +; + +on null string +required_capability: categorize_v4 + +FROM sample_data + | EVAL x = null::string + | STATS COUNT() BY category=CATEGORIZE(x) + | SORT category +; + +COUNT():long | category:keyword + 7 | null +; + +filtering out all data +required_capability: categorize_v4 + +FROM sample_data + | WHERE @timestamp < "2023-10-23T00:00:00Z" + | STATS COUNT() BY category=CATEGORIZE(message) + | SORT category +; + +COUNT():long | category:keyword +; + +filtering out all data with constant +required_capability: categorize_v4 + +FROM sample_data + | STATS COUNT() BY category=CATEGORIZE(message) + | WHERE false +; + +COUNT():long | category:keyword +; + +drop output columns +required_capability: categorize_v4 + +FROM sample_data + | STATS count=COUNT() BY category=CATEGORIZE(message) + | EVAL x=1 + | DROP count, category +; + +x:integer +1 +1 +1 +; + +category value processing +required_capability: categorize_v4 + +ROW message = ["connected to a", "connected to b", "disconnected"] + | STATS COUNT() BY category=CATEGORIZE(message) + | EVAL category = TO_UPPER(category) + | SORT category +; + +COUNT():long | category:keyword + 2 | .*?CONNECTED.+?TO.*? + 1 | .*?DISCONNECTED.*? +; + +row aliases +required_capability: categorize_v4 + +ROW message = "connected to xyz" + | EVAL x = message + | STATS COUNT() BY category=CATEGORIZE(x) + | EVAL y = category + | SORT y +; + +COUNT():long | category:keyword | y:keyword + 1 | .*?connected.+?to.+?xyz.*? | .*?connected.+?to.+?xyz.*? +; + +from aliases +required_capability: categorize_v4 + +FROM sample_data + | EVAL x = message + | STATS COUNT() BY category=CATEGORIZE(x) + | EVAL y = category + | SORT y +; + +COUNT():long | category:keyword | y:keyword + 3 | .*?Connected.+?to.*? | .*?Connected.+?to.*? + 3 | .*?Connection.+?error.*? | .*?Connection.+?error.*? + 1 | .*?Disconnected.*? | .*?Disconnected.*? +; + +row aliases with keep +required_capability: categorize_v4 + +ROW message = "connected to xyz" + | EVAL x = message + | KEEP x + | STATS COUNT() BY category=CATEGORIZE(x) + | EVAL y = category + | KEEP `COUNT()`, y + | SORT y +; + +COUNT():long | y:keyword + 1 | .*?connected.+?to.+?xyz.*? +; + +from aliases with keep +required_capability: categorize_v4 + +FROM sample_data + | EVAL x = message + | KEEP x + | STATS COUNT() BY category=CATEGORIZE(x) + | EVAL y = category + | KEEP `COUNT()`, y + | SORT y +; + +COUNT():long | y:keyword + 3 | .*?Connected.+?to.*? + 3 | .*?Connection.+?error.*? + 1 | .*?Disconnected.*? +; + +row rename +required_capability: categorize_v4 + +ROW message = "connected to xyz" + | RENAME message as x + | STATS COUNT() BY category=CATEGORIZE(x) + | RENAME category as y + | SORT y +; + +COUNT():long | y:keyword + 1 | .*?connected.+?to.+?xyz.*? +; + +from rename +required_capability: categorize_v4 + +FROM sample_data + | RENAME message as x + | STATS COUNT() BY category=CATEGORIZE(x) + | RENAME category as y + | SORT y +; + +COUNT():long | y:keyword + 3 | .*?Connected.+?to.*? + 3 | .*?Connection.+?error.*? + 1 | .*?Disconnected.*? +; + +row drop +required_capability: categorize_v4 + +ROW message = "connected to a" + | STATS c = COUNT() BY category=CATEGORIZE(message) + | DROP category + | SORT c +; + +c:long +1 +; + +from drop +required_capability: categorize_v4 + +FROM sample_data + | STATS c = COUNT() BY category=CATEGORIZE(message) + | DROP category + | SORT c +; + +c:long +1 +3 +3 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 237c6a9af197f..734e2ef5e475e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -216,7 +216,6 @@ string:keyword |datetime:date ; convertFromUnsignedLong -required_capability: convert_warn row ul = [9223372036854775808, 520128000000] | eval dt = to_datetime(ul); warningRegex:Line 1:58: evaluation of \[to_datetime\(ul\)\] failed, treating result as null. Only first 20 failures recorded. @@ -1286,3 +1285,108 @@ ROW a = GREATEST(TO_DATETIME("1957-05-23T00:00:00Z"), TO_DATETIME("1958-02-19T00 a:datetime 1958-02-19T00:00:00 ; + +evalDateTruncMonthInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT hire_date +| EVAL x = date_trunc("1 month", hire_date) +| KEEP emp_no, hire_date, x +| LIMIT 5; + +emp_no:integer | hire_date:date | x:date +10009 | 1985-02-18T00:00:00.000Z | 1985-02-01T00:00:00.000Z +10048 | 1985-02-24T00:00:00.000Z | 1985-02-01T00:00:00.000Z +10098 | 1985-05-13T00:00:00.000Z | 1985-05-01T00:00:00.000Z +10076 | 1985-07-09T00:00:00.000Z | 1985-07-01T00:00:00.000Z +10061 | 1985-09-17T00:00:00.000Z | 1985-09-01T00:00:00.000Z +; + +evalDateTruncHourInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT hire_date +| EVAL x = date_trunc("240 hours", hire_date) +| KEEP emp_no, hire_date, x +| LIMIT 5; + +emp_no:integer | hire_date:date | x:date +10009 | 1985-02-18T00:00:00.000Z | 1985-02-11T00:00:00.000Z +10048 | 1985-02-24T00:00:00.000Z | 1985-02-21T00:00:00.000Z +10098 | 1985-05-13T00:00:00.000Z | 1985-05-12T00:00:00.000Z +10076 | 1985-07-09T00:00:00.000Z | 1985-07-01T00:00:00.000Z +10061 | 1985-09-17T00:00:00.000Z | 1985-09-09T00:00:00.000Z +; + +evalDateTruncDayInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| SORT @timestamp ASC +| EVAL t = DATE_TRUNC("1 day", @timestamp) +| KEEP t; + +t:date +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +2023-10-23T00:00:00 +; + +evalDateTruncMinuteInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM sample_data +| SORT @timestamp ASC +| EVAL t = DATE_TRUNC("1 minute", @timestamp) +| KEEP t; + +t:date +2023-10-23T12:15:00 +2023-10-23T12:27:00 +2023-10-23T13:33:00 +2023-10-23T13:51:00 +2023-10-23T13:52:00 +2023-10-23T13:53:00 +2023-10-23T13:55:00 +; + +evalDateTruncDayInStringNull +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| WHERE emp_no == 10040 +| EVAL x = date_trunc("1 day", birth_date) +| KEEP emp_no, birth_date, x; + +emp_no:integer | birth_date:date | x:date +10040 | null | null +; + +evalDateTruncYearInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +ROW a = 1 +| EVAL year_hired = DATE_TRUNC("1 year", "1991-06-26T00:00:00.000Z") +; + +a:integer | year_hired:date +1 | 1991-01-01T00:00:00.000Z +; + +filteringWithTemporalAmountInString +required_capability: implicit_casting_string_literal_to_temporal_amount + +FROM employees +| SORT emp_no +| WHERE birth_date < "2024-01-01" - 70 years +| STATS cnt = count(*); + +cnt:long +19 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index a53777cff7c71..a6e1a771374ca 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -663,7 +663,7 @@ required_capability: fn_bit_length FROM airports | WHERE country == "India" | KEEP city -| EVAL fn_length=LENGTH(city), fn_bit_length = BIT_LENGTH(city) +| EVAL fn_length = LENGTH(city), fn_bit_length = BIT_LENGTH(city) // end::bitLength[] | SORT city | LIMIT 3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec index 3c38bd190b0b1..25b114b5d1daf 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec @@ -580,7 +580,6 @@ CPH | Copenhagen | POINT(12.5683 55.6761) | Denmark spatialEnrichmentGeoMatchStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] required_capability: enrich_load -required_capability: mv_warn FROM airports | ENRICH city_boundaries ON city_location WITH airport, region, city_boundary diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index fc2350491db91..592b06107c8b5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -379,7 +379,7 @@ required_capability: fn_byte_length FROM airports | WHERE country == "India" | KEEP city -| EVAL fn_length=LENGTH(city), fn_byte_length = BYTE_LENGTH(city) +| EVAL fn_length = LENGTH(city), fn_byte_length = BYTE_LENGTH(city) // end::byteLength[] | SORT city | LIMIT 3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 537b69547c6be..3505b52e5599e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -99,7 +99,6 @@ int:integer |dbl:double ; lessThanMultivalue -required_capability: mv_warn from employees | where salary_change < 1 | keep emp_no, salary_change | sort emp_no | limit 5; warningRegex:evaluation of \[salary_change < 1\] failed, treating result as null. Only first 20 failures recorded. @@ -115,7 +114,6 @@ emp_no:integer |salary_change:double ; greaterThanMultivalue -required_capability: mv_warn from employees | where salary_change > 1 | keep emp_no, salary_change | sort emp_no | limit 5; warningRegex:evaluation of \[salary_change > 1\] failed, treating result as null. Only first 20 failures recorded. @@ -131,7 +129,6 @@ emp_no:integer |salary_change:double ; equalToMultivalue -required_capability: mv_warn from employees | where salary_change == 1.19 | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change == 1.19] failed, treating result as null. Only first 20 failures recorded. @@ -143,7 +140,6 @@ emp_no:integer |salary_change:double ; equalToOrEqualToMultivalue -required_capability: mv_warn from employees | where salary_change == 1.19 or salary_change == 7.58 | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change] failed, treating result as null. Only first 20 failures recorded. @@ -156,7 +152,6 @@ emp_no:integer |salary_change:double ; inMultivalue -required_capability: mv_warn from employees | where salary_change in (1.19, 7.58) | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change in (1.19, 7.58)] failed, treating result as null. Only first 20 failures recorded. @@ -169,7 +164,6 @@ emp_no:integer |salary_change:double ; notLessThanMultivalue -required_capability: mv_warn from employees | where not(salary_change < 1) | keep emp_no, salary_change | sort emp_no | limit 5; warningRegex:evaluation of \[.*salary_change < 1.*\] failed, treating result as null. Only first 20 failures recorded. @@ -185,7 +179,6 @@ emp_no:integer |salary_change:double ; notGreaterThanMultivalue -required_capability: mv_warn from employees | where not(salary_change > 1) | keep emp_no, salary_change | sort emp_no | limit 5; warningRegex:evaluation of \[.*salary_change > 1.*\] failed, treating result as null. Only first 20 failures recorded. @@ -201,7 +194,6 @@ emp_no:integer |salary_change:double ; notEqualToMultivalue -required_capability: mv_warn from employees | where not(salary_change == 1.19) | keep emp_no, salary_change | sort emp_no | limit 5; warningRegex:evaluation of \[.*salary_change == 1.19.*\] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec-ignored similarity index 100% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/inlinestats.csv-spec-ignored diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index b399734151412..f4b6d41a7a027 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -1,7 +1,6 @@ // Integral types-specific tests inLongAndInt -required_capability: mv_warn from employees | where avg_worked_seconds in (372957040, salary_change.long, 236703986) | where emp_no in (10017, emp_no - 1) | keep emp_no, avg_worked_seconds; warningRegex:evaluation of \[avg_worked_seconds in \(372957040, salary_change.long, 236703986\)\] failed, treating result as null. Only first 20 failures recorded. @@ -68,7 +67,6 @@ long:long |ul:ul ; convertDoubleToUL -required_capability: convert_warn row d = 123.4 | eval ul = to_ul(d), overflow = to_ul(1e20); warningRegex:Line 1:48: evaluation of \[to_ul\(1e20\)\] failed, treating result as null. Only first 20 failures recorded. @@ -127,7 +125,6 @@ int:integer |long:long ; convertULToLong -required_capability: convert_warn row ul = [9223372036854775807, 9223372036854775808] | eval long = to_long(ul); warningRegex:Line 1:67: evaluation of \[to_long\(ul\)\] failed, treating result as null. Only first 20 failures recorded. @@ -170,7 +167,6 @@ str1:keyword |str2:keyword |str3:keyword |long1:long |long2:long |long3:long ; convertDoubleToLong -required_capability: convert_warn row d = 123.4 | eval d2l = to_long(d), overflow = to_long(1e19); warningRegex:Line 1:51: evaluation of \[to_long\(1e19\)\] failed, treating result as null. Only first 20 failures recorded. @@ -190,7 +186,6 @@ int:integer |ii:integer ; convertLongToInt -required_capability: convert_warn // tag::to_int-long[] ROW long = [5013792, 2147483647, 501379200000] @@ -207,7 +202,6 @@ long:long |int:integer ; convertULToInt -required_capability: convert_warn row ul = [2147483647, 9223372036854775808] | eval int = to_int(ul); warningRegex:Line 1:57: evaluation of \[to_int\(ul\)\] failed, treating result as null. Only first 20 failures recorded. @@ -239,7 +233,6 @@ int_str:keyword |int_dbl_str:keyword |is2i:integer|ids2i:integer ; convertStringToIntFail#[skip:-8.13.99, reason:warning changed in 8.14] -required_capability: mv_warn row str1 = "2147483647.2", str2 = "2147483648", non = "no number" | eval i1 = to_integer(str1), i2 = to_integer(str2), noi = to_integer(non); warningRegex:Line 1:79: evaluation of \[to_integer\(str1\)\] failed, treating result as null. Only first 20 failures recorded. @@ -254,7 +247,6 @@ str1:keyword |str2:keyword |non:keyword |i1:integer |i2:integer | ; convertDoubleToInt -required_capability: convert_warn row d = 123.4 | eval d2i = to_integer(d), overflow = to_integer(1e19); warningRegex:Line 1:54: evaluation of \[to_integer\(1e19\)\] failed, treating result as null. Only first 20 failures recorded. @@ -265,7 +257,6 @@ d:double |d2i:integer |overflow:integer ; lessThanMultivalue -required_capability: mv_warn from employees | where salary_change.int < 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; warningRegex:evaluation of \[salary_change.int < 1\] failed, treating result as null. Only first 20 failures recorded. @@ -281,7 +272,6 @@ emp_no:integer |salary_change.int:integer ; greaterThanMultivalue -required_capability: mv_warn from employees | where salary_change.int > 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; warningRegex:evaluation of \[salary_change.int > 1\] failed, treating result as null. Only first 20 failures recorded. @@ -297,7 +287,6 @@ emp_no:integer |salary_change.int:integer ; equalToMultivalue -required_capability: mv_warn from employees | where salary_change.int == 0 | keep emp_no, salary_change.int | sort emp_no; warningRegex:evaluation of \[salary_change.int == 0\] failed, treating result as null. Only first 20 failures recorded. @@ -312,7 +301,6 @@ emp_no:integer |salary_change.int:integer ; equalToOrEqualToMultivalue -required_capability: mv_warn from employees | where salary_change.int == 1 or salary_change.int == 8 | keep emp_no, salary_change.int | sort emp_no; warningRegex:evaluation of \[salary_change.int\] failed, treating result as null. Only first 20 failures recorded. @@ -325,7 +313,6 @@ emp_no:integer |salary_change.int:integer ; inMultivalue -required_capability: mv_warn from employees | where salary_change.int in (1, 7) | keep emp_no, salary_change.int | sort emp_no; warningRegex:evaluation of \[salary_change.int in \(1, 7\)\] failed, treating result as null. Only first 20 failures recorded. @@ -338,7 +325,6 @@ emp_no:integer |salary_change.int:integer ; notLessThanMultivalue -required_capability: mv_warn from employees | where not(salary_change.int < 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warningRegex:evaluation of \[.*salary_change.int < 1.*\] failed, treating result as null. Only first 20 failures recorded. @@ -354,7 +340,6 @@ emp_no:integer |salary_change.int:integer ; notGreaterThanMultivalue -required_capability: mv_warn from employees | where not(salary_change.int > 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warningRegex:evaluation of \[.*salary_change.int > 1.*\] failed, treating result as null. Only first 20 failures recorded. @@ -370,7 +355,6 @@ emp_no:integer |salary_change.int:integer ; notEqualToMultivalue -required_capability: mv_warn from employees | where not(salary_change.int == 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warningRegex:evaluation of \[.*salary_change.int == 1.*\] failed, treating result as null. Only first 20 failures recorded diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 0fb6994ef759f..4418f7e0aa7ed 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -16,7 +16,6 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; equals -required_capability: mv_warn from hosts | sort host, card | where ip0 == ip1 | keep card, host, ip0, ip1; warningRegex:evaluation of \[ip0 == ip1\] failed, treating result as null. Only first 20 failures recorded. @@ -60,7 +59,6 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; lessThan -required_capability: mv_warn from hosts | sort host, card, ip1 | where ip0 < ip1 | keep card, host, ip0, ip1; warningRegex:evaluation of \[ip0 < ip1\] failed, treating result as null. Only first 20 failures recorded. @@ -73,7 +71,6 @@ lo0 |gamma |fe80::cae2:65ff:fece:feb9|fe81::cae2:65ff:fece:f ; notEquals -required_capability: mv_warn from hosts | sort host, card, ip1 | where ip0 != ip1 | keep card, host, ip0, ip1; warningRegex:evaluation of \[ip0 != ip1\] failed, treating result as null. Only first 20 failures recorded. @@ -125,7 +122,6 @@ null |[127.0.0.1, 127.0.0.2, 127.0.0.3] ; conditional -required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | keep eq, ip0, ip1; ignoreOrder:true @@ -146,7 +142,6 @@ fe80::cae2:65ff:fece:fec1 |[fe80::cae2:65ff:fece:feb ; in -required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; ignoreOrder:true @@ -168,7 +163,6 @@ eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece inWithWarningsRegex#[skip:-8.13.99, reason:regex warnings in tests introduced in v 8.14.0] -required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; ignoreOrder:true @@ -188,7 +182,6 @@ eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece ; cidrMatchSimple -required_capability: mv_warn from hosts | where cidr_match(ip1, "127.0.0.2/32") | keep card, host, ip0, ip1; warningRegex:evaluation of \[cidr_match\(ip1, \\\"127.0.0.2/32\\\"\)\] failed, treating result as null. Only first 20 failures recorded. @@ -199,7 +192,6 @@ eth1 |beta |127.0.0.1 |127.0.0.2 ; cidrMatchNullField -required_capability: mv_warn from hosts | where cidr_match(ip0, "127.0.0.2/32") is null | keep card, host, ip0, ip1; ignoreOrder:true @@ -213,7 +205,6 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; cdirMatchMultipleArgs -required_capability: mv_warn //tag::cdirMatchMultipleArgs[] FROM hosts @@ -233,7 +224,6 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 ; cidrMatchFunctionArg -required_capability: mv_warn from hosts | where cidr_match(ip1, concat("127.0.0.2", "/32"), "127.0.0.3/32") | keep card, host, ip0, ip1; ignoreOrder:true @@ -246,7 +236,6 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 ; cidrMatchFieldArg -required_capability: mv_warn from hosts | eval cidr="127.0.0.2" | where cidr_match(ip1, cidr, "127.0.0.3/32") | keep card, host, ip0, ip1; ignoreOrder:true @@ -366,7 +355,6 @@ eth0 |beta |127.0.0.1 |::1 ; pushDownIPWithIn -required_capability: mv_warn from hosts | where ip1 in (to_ip("::1"), to_ip("127.0.0.1")) | keep card, host, ip0, ip1; ignoreOrder:true @@ -380,7 +368,6 @@ eth0 |beta |127.0.0.1 |::1 ; pushDownIPWithComparision -required_capability: mv_warn from hosts | where ip1 > to_ip("127.0.0.1") | keep card, ip1; ignoreOrder:true diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/kql-function.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/kql-function.csv-spec new file mode 100644 index 0000000000000..02be58efac774 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/kql-function.csv-spec @@ -0,0 +1,153 @@ +############################################### +# Tests for KQL function +# + +kqlWithField +required_capability: kql_function + +// tag::kql-with-field[] +FROM books +| WHERE KQL("author: Faulkner") +| KEEP book_no, author +| SORT book_no +| LIMIT 5; +// end::kql-with-field[] + +// tag::kql-with-field-result[] +book_no:keyword | author:text +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] +2713 | William Faulkner +2847 | Colleen Faulkner +2883 | William Faulkner +3293 | Danny Faulkner +; +// end::kql-with-field-result[] + +kqlWithMultipleFields +required_capability: kql_function + +from books +| where kql("title:Return* AND author:*Tolkien") +| keep book_no, title; +ignoreOrder:true + +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +7350 | Return of the Shadow +; + +kqlWithQueryExpressions +required_capability: kql_function + +from books +| where kql(CONCAT("title:Return*", " AND author:*Tolkien")) +| keep book_no, title; +ignoreOrder:true + +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +7350 | Return of the Shadow +; + +kqlWithConjunction +required_capability: kql_function + +from books +| where kql("title: Rings") and ratings > 4.6 +| keep book_no, title; +ignoreOrder:true + +book_no:keyword | title:text +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) +; + +kqlWithFunctionPushedToLucene +required_capability: kql_function + +from hosts +| where kql("host: beta") and cidr_match(ip1, "127.0.0.2/32", "127.0.0.3/32") +| keep card, host, ip0, ip1; +ignoreOrder:true + +card:keyword |host:keyword |ip0:ip |ip1:ip +eth1 |beta |127.0.0.1 |127.0.0.2 +; + +kqlWithNonPushableConjunction +required_capability: kql_function + +from books +| where kql("title: Rings") and length(title) > 75 +| keep book_no, title; +ignoreOrder:true + +book_no:keyword | title:text +4023 |A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings +; + +kqlWithMultipleWhereClauses +required_capability: kql_function + +from books +| where kql("title: rings") +| where kql("year > 1 AND year < 2005") +| keep book_no, title; +ignoreOrder:true + +book_no:keyword | title:text +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) +; + + +kqlWithMultivaluedTextField +required_capability: kql_function + +from employees +| where kql("job_positions: Tech Lead AND job_positions:(Reporting Analyst)") +| keep emp_no, first_name, last_name; +ignoreOrder:true + +emp_no:integer | first_name:keyword | last_name:keyword +10004 | Chirstian | Koblick +10010 | Duangkaew | Piveteau +10011 | Mary | Sluis +10088 | Jungsoon | Syrzycki +10093 | Sailaja | Desikan +10097 | Remzi | Waschkowski +; + +kqlWithMultivaluedNumericField +required_capability: kql_function + +from employees +| where kql("salary_change > 14") +| keep emp_no, first_name, last_name, salary_change; +ignoreOrder:true + +emp_no:integer | first_name:keyword | last_name:keyword | salary_change:double +10003 | Parto | Bamford | [12.82, 14.68] +10015 | Guoxiang | Nooteboom | [12.4, 14.25] +10023 | Bojan | Montemayor | [0.8, 14.63] +10040 | Weiyi | Meriste | [-8.94, 1.92, 6.97, 14.74] +10061 | Tse | Herber | [-2.58, -0.95, 14.39] +10065 | Satosi | Awdeh | [-9.81, -1.47, 14.44] +10099 | Valter | Sullins | [-8.78, -3.98, 10.71, 14.26] +; + +testMultiValuedFieldWithConjunction +required_capability: kql_function + +from employees +| where (kql("job_positions: (Data Scientist) OR job_positions:(Support Engineer)")) and gender == "F" +| keep emp_no, first_name, last_name; +ignoreOrder:true + +emp_no:integer | first_name:keyword | last_name:keyword +10023 | Bojan | Montemayor +10041 | Uri | Lenart +10044 | Mingsen | Casley +10053 | Sanjiv | Zschoche +10069 | Margareta | Bierman +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json new file mode 100644 index 0000000000000..b73d1f9accf92 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json @@ -0,0 +1,5 @@ +{ + "index": { + "mode": "lookup" + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec new file mode 100644 index 0000000000000..5de353978b307 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -0,0 +1,87 @@ +// +// CSV spec for LOOKUP JOIN command +// Reuses the sample dataset and commands from enrich.csv-spec +// + +//TODO: this sometimes returns null instead of the looked up value (likely related to the execution order) +basicOnTheDataNode-Ignore +required_capability: join_lookup_v3 + +FROM employees +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE emp_no >= 10091 AND emp_no < 10094 +| SORT emp_no +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10091 | 3 | Spanish +10092 | 1 | English +10093 | 3 | Spanish +; + +basicRow-Ignore +required_capability: join_lookup_v3 + +ROW language_code = 1 +| LOOKUP JOIN languages_lookup ON language_code +; + +language_code:keyword | language_name:keyword +1 | English +; + +basicOnTheCoordinator +required_capability: join_lookup_v3 + +FROM employees +| SORT emp_no +| LIMIT 3 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| KEEP emp_no, language_code, language_name +; + +emp_no:integer | language_code:integer | language_name:keyword +10001 | 2 | French +10002 | 5 | null +10003 | 4 | German +; + +//TODO: this sometimes returns null instead of the looked up value (likely related to the execution order) +subsequentEvalOnTheDataNode-Ignore +required_capability: join_lookup_v3 + +FROM employees +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| WHERE emp_no >= 10091 AND emp_no < 10094 +| SORT emp_no +| KEEP emp_no, language_code, language_name +| EVAL language_name = TO_LOWER(language_name), language_code_x2 = 2*language_code +; + +emp_no:integer | language_code:integer | language_name:keyword | language_code_x2:integer +10091 | 3 | spanish | 6 +10092 | 1 | english | 2 +10093 | 3 | spanish | 6 +; + +subsequentEvalOnTheCoordinator +required_capability: join_lookup_v3 + +FROM employees +| SORT emp_no +| LIMIT 3 +| EVAL language_code = languages +| LOOKUP JOIN languages_lookup ON language_code +| KEEP emp_no, language_code, language_name +| EVAL language_name = TO_LOWER(language_name), language_code_x2 = 2*language_code +; + +emp_no:integer | language_code:integer | language_name:keyword | language_code_x2:integer +10001 | 2 | french | 4 +10002 | 5 | null | 10 +10003 | 4 | german | 8 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec-ignored similarity index 80% rename from x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec rename to x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec-ignored index 9cf96f7c0b6de..685e3ab2778e1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup.csv-spec-ignored @@ -4,7 +4,7 @@ FROM employees | SORT emp_no | LIMIT 4 | RENAME languages AS int -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int | RENAME int AS languages, name AS lang_name | KEEP emp_no, languages, lang_name ; @@ -19,7 +19,7 @@ emp_no:integer | languages:integer | lang_name:keyword keywordByMvIntAndQuotedSource required_capability: lookup_v4 ROW int=[1, 2, 3] -| LOOKUP "int_number_names" ON int +| LOOKUP_🐔 "int_number_names" ON int ; int:integer | name:keyword @@ -29,7 +29,7 @@ int:integer | name:keyword keywordByDupeIntAndTripleQuotedSource required_capability: lookup_v4 ROW int=[1, 1, 1] -| LOOKUP """int_number_names""" ON int +| LOOKUP_🐔 """int_number_names""" ON int ; int:integer | name:keyword @@ -39,10 +39,10 @@ int:integer | name:keyword intByKeyword required_capability: lookup_v4 ROW name="two" -| LOOKUP int_number_names ON name +| LOOKUP_🐔 int_number_names ON name ; -name:keyword | int:integer +name:keyword | int:integer two | 2 ; @@ -53,7 +53,7 @@ FROM employees | SORT emp_no | LIMIT 4 | RENAME languages.long AS long -| LOOKUP long_number_names ON long +| LOOKUP_🐔 long_number_names ON long | RENAME long AS languages, name AS lang_name | KEEP emp_no, languages, lang_name ; @@ -68,7 +68,7 @@ emp_no:integer | languages:long | lang_name:keyword longByKeyword required_capability: lookup_v4 ROW name="two" -| LOOKUP long_number_names ON name +| LOOKUP_🐔 long_number_names ON name ; name:keyword | long:long @@ -81,7 +81,7 @@ FROM employees | SORT emp_no | LIMIT 4 | RENAME height AS double -| LOOKUP double_number_names ON double +| LOOKUP_🐔 double_number_names ON double | RENAME double AS height, name AS height_name | KEEP emp_no, height, height_name ; @@ -96,7 +96,7 @@ emp_no:integer | height:double | height_name:keyword floatByKeyword required_capability: lookup_v4 ROW name="two point zero eight" -| LOOKUP double_number_names ON name +| LOOKUP_🐔 double_number_names ON name ; name:keyword | double:double @@ -106,7 +106,7 @@ two point zero eight | 2.08 floatByNullMissing required_capability: lookup_v4 ROW name=null -| LOOKUP double_number_names ON name +| LOOKUP_🐔 double_number_names ON name ; name:null | double:double @@ -116,7 +116,7 @@ name:null | double:double floatByNullMatching required_capability: lookup_v4 ROW name=null -| LOOKUP double_number_names_with_null ON name +| LOOKUP_🐔 double_number_names_with_null ON name ; name:null | double:double @@ -126,7 +126,7 @@ name:null | double:double intIntByKeywordKeyword required_capability: lookup_v4 ROW aa="foo", ab="zoo" -| LOOKUP big ON aa, ab +| LOOKUP_🐔 big ON aa, ab ; aa:keyword | ab:keyword | na:integer | nb:integer @@ -136,7 +136,7 @@ foo | zoo | 1 | -1 intIntByKeywordKeywordMissing required_capability: lookup_v4 ROW aa="foo", ab="zoi" -| LOOKUP big ON aa, ab +| LOOKUP_🐔 big ON aa, ab ; aa:keyword | ab:keyword | na:integer | nb:integer @@ -146,7 +146,7 @@ foo | zoi | null | null intIntByKeywordKeywordSameValues required_capability: lookup_v4 ROW aa="foo", ab="foo" -| LOOKUP big ON aa, ab +| LOOKUP_🐔 big ON aa, ab ; aa:keyword | ab:keyword | na:integer | nb:integer @@ -156,7 +156,7 @@ foo | foo | 2 | -2 intIntByKeywordKeywordSameValuesMissing required_capability: lookup_v4 ROW aa="bar", ab="bar" -| LOOKUP big ON aa, ab +| LOOKUP_🐔 big ON aa, ab ; aa:keyword | ab:keyword | na:integer | nb:integer @@ -168,7 +168,7 @@ lookupBeforeStats-Ignore required_capability: lookup_v4 FROM employees | RENAME languages AS int -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int | RENAME name AS languages | STATS height=ROUND(AVG(height), 3) BY languages | SORT height ASC; @@ -178,7 +178,7 @@ height:double | languages:keyword 1.732 | one 1.762 | two 1.764 | three - 1.809 | null + 1.809 | null 1.847 | five ; @@ -186,14 +186,14 @@ lookupAfterStats required_capability: lookup_v4 FROM employees | STATS int=TO_INT(AVG(height)) -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int | KEEP name; name:keyword two ; -// Makes sure the LOOKUP squashes previous names +// Makes sure the LOOKUP_🐔 squashes previous names doesNotDuplicateNames required_capability: lookup_v4 FROM employees @@ -201,7 +201,7 @@ FROM employees | LIMIT 4 | RENAME languages.long AS long | EVAL name = CONCAT(first_name, " ", last_name) -| LOOKUP long_number_names ON long +| LOOKUP_🐔 long_number_names ON long | RENAME long AS languages | KEEP emp_no, languages, name ; @@ -219,7 +219,7 @@ required_capability: lookup_v4 FROM employees | WHERE emp_no < 10005 | RENAME languages AS int -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int | RENAME name AS languages | KEEP languages, emp_no | SORT languages ASC, emp_no ASC @@ -238,7 +238,7 @@ FROM employees | WHERE emp_no < 10005 | SORT languages ASC, emp_no ASC | RENAME languages AS int -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int | RENAME name AS languages | KEEP languages, emp_no ; @@ -256,7 +256,7 @@ FROM employees | KEEP emp_no | WHERE emp_no == 10001 | EVAL left = "left", int = emp_no - 10000, name = "name", right = "right" -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int ; emp_no:integer | left:keyword | int:integer | right:keyword | name:keyword @@ -269,65 +269,57 @@ FROM employees | KEEP emp_no | WHERE emp_no == 10001 | EVAL left = "left", nb = -10011+emp_no, na = "na", middle = "middle", ab = "ab", aa = "bar", right = "right" -| LOOKUP big ON aa, nb +| LOOKUP_🐔 big ON aa, nb ; -emp_no:integer | left:keyword | nb:integer | middle:keyword | aa:keyword | right:keyword | ab:keyword | na:integer +emp_no:integer | left:keyword | nb:integer | middle:keyword | aa:keyword | right:keyword | ab:keyword | na:integer 10001 | left | -10 | middle | bar | right | zop | 10 ; // -// Make sure that the new LOOKUP syntax doesn't clash with any existing things -// named "lookup" +// Make sure that the new LOOKUP_🐔 syntax doesn't clash with any existing things +// named "lookup_🐔" // -rowNamedLookup -required_capability: lookup_v4 -ROW lookup = "a" -; - -lookup:keyword - a -; rowNamedLOOKUP required_capability: lookup_v4 -ROW LOOKUP = "a" +ROW lookup_🐔 = "a" ; -LOOKUP:keyword +lookup_🐔:keyword a ; evalNamedLookup required_capability: lookup_v4 -ROW a = "a" | EVAL lookup = CONCAT(a, "1") +ROW a = "a" | EVAL lookup_🐔 = CONCAT(a, "1") ; -a:keyword | lookup:keyword +a:keyword | lookup_🐔:keyword a | a1 ; dissectNamedLookup required_capability: lookup_v4 -row a = "foo bar" | dissect a "foo %{lookup}"; +row a = "foo bar" | dissect a "foo %{lookup_🐔}"; -a:keyword | lookup:keyword +a:keyword | lookup_🐔:keyword foo bar | bar ; renameIntoLookup required_capability: lookup_v4 -row a = "foo bar" | RENAME a AS lookup; +row a = "foo bar" | RENAME a AS lookup_🐔; -lookup:keyword +lookup_🐔:keyword foo bar ; sortOnLookup required_capability: lookup_v4 -ROW lookup = "a" | SORT lookup +ROW lookup_🐔 = "a" | SORT lookup_🐔 ; -lookup:keyword +lookup_🐔:keyword a ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-mv_sample_data.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-mv_sample_data.json new file mode 100644 index 0000000000000..838a8ba09b45a --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-mv_sample_data.json @@ -0,0 +1,16 @@ +{ + "properties": { + "@timestamp": { + "type": "date" + }, + "client_ip": { + "type": "ip" + }, + "event_duration": { + "type": "long" + }, + "message": { + "type": "keyword" + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json index c587b69828170..db15133f036bb 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-semantic_text.json @@ -72,6 +72,10 @@ "st_base64": { "type": "semantic_text", "inference_id": "test_sparse_inference" + }, + "st_logs": { + "type": "semantic_text", + "inference_id": "test_sparse_inference" } } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index da069836504d4..2fe2feb3bc219 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -214,8 +214,6 @@ height:double | s:double ; powSalarySquared -required_capability: pow_double - from employees | eval s = pow(to_long(salary) - 75000, 2) + 10000 | keep salary, s | sort salary desc | limit 4; salary:integer | s:double @@ -631,8 +629,6 @@ base:double | exponent:integer | result:double ; powIntInt -required_capability: pow_double - ROW base = 2, exponent = 2 | EVAL s = POW(base, exponent) ; @@ -642,8 +638,6 @@ base:integer | exponent:integer | s:double ; powIntIntPlusInt -required_capability: pow_double - row s = 1 + pow(2, 2); s:double @@ -658,8 +652,6 @@ s:double ; powIntUL -required_capability: pow_double - row x = pow(1, 9223372036854775808); x:double @@ -667,8 +659,6 @@ x:double ; powLongUL -required_capability: pow_double - row x = to_long(1) | eval x = pow(x, 9223372036854775808); x:double @@ -676,8 +666,6 @@ x:double ; powUnsignedLongUL -required_capability: pow_double - row x = to_ul(1) | eval x = pow(x, 9223372036854775808); x:double @@ -701,8 +689,6 @@ null ; powULInt -required_capability: pow_double - row x = pow(to_unsigned_long(9223372036854775807), 1); x:double @@ -710,8 +696,6 @@ x:double ; powULIntOverrun -required_capability: pow_double - ROW x = POW(9223372036854775808, 2) ; @@ -732,8 +716,6 @@ x:double ; powULLong -required_capability: pow_double - row x = to_long(10) | eval x = pow(to_unsigned_long(10), x); x:double @@ -741,8 +723,6 @@ x:double ; powULLongOverrun -required_capability: pow_double - row x = to_long(100) | eval x = pow(to_unsigned_long(10), x); x:double diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec new file mode 100644 index 0000000000000..4d7ee9b1b5af6 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-remote.csv-spec @@ -0,0 +1,151 @@ +simpleKeep +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _index desc, emp_no | limit 2 | keep emp_no, _index, _version; + +emp_no:integer |_index:keyword |_version:long +10001 |remote_cluster:employees |1 +10002 |remote_cluster:employees |1 +; + +aliasWithSameName +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _index desc, emp_no | limit 2 | eval _index = _index, _version = _version | keep emp_no, _index, _version; + +emp_no:integer |_index:keyword |_version:long +10001 |remote_cluster:employees |1 +10002 |remote_cluster:employees |1 +; + +inComparison +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | where _index == "remote_cluster:employees" | where _version == 1 | keep emp_no | limit 2; + +emp_no:integer +10001 +10002 +; + +metaIndexInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +FROM employees METADATA _index, _id +| STATS max = MAX(emp_no) BY _index | SORT _index; + +max:integer |_index:keyword +10100 |remote_cluster:employees +; + +metaIndexAliasedInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i | SORT _i; + +max:integer |_i:keyword +10100 |remote_cluster:employees +; + +metaVersionInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | stats min = min(emp_no) by _version; + +min:integer |_version:long +10001 |1 +; + +metaVersionAliasedInAggs +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | eval _v = _version | stats min = min(emp_no) by _v; + +min:integer |_v:long +10001 |1 +; + +inAggsAndAsGroups +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | stats max = max(_version) by _index | SORT _index; + +max:long |_index:keyword +1 |remote_cluster:employees +; + +inAggsAndAsGroupsAliased +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | eval _i = _index, _v = _version | stats max = max(_v) by _i | SORT _i; + +max:long |_i:keyword +1 |remote_cluster:employees +; + +inFunction +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | where length(_index) == length("remote_cluster:employees") | where abs(_version) == 1 | keep emp_no | limit 2; + +emp_no:integer +10001 +10002 +; + +inArithmetics +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | eval i = _version + 2 | stats min = min(emp_no) by i; + +min:integer |i:long +10001 |3 +; + +inSort +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort _version, _index desc, emp_no | keep emp_no, _version, _index | limit 2; + +emp_no:integer |_version:long |_index:keyword +10001 |1 |remote_cluster:employees +10002 |1 |remote_cluster:employees +; + +withMvFunction +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _version | eval i = mv_avg(_version) + 2 | stats min = min(emp_no) by i; + +min:integer |i:double +10001 |3.0 +; + +overwritten +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +from employees metadata _index, _version | sort emp_no | eval _index = 3, _version = "version" | keep emp_no, _index, _version | limit 3; + +emp_no:integer |_index:integer |_version:keyword +10001 |3 |version +10002 |3 |version +10003 |3 |version +; + +multipleIndices +required_capability: metadata_fields +required_capability: metadata_fields_remote_test +FROM ul_logs, apps METADATA _index, _version +| WHERE id IN (13, 14) AND _version == 1 +| EVAL key = CONCAT(_index, "_", TO_STR(id)) +| SORT id, _index +| KEEP id, _index, _version, key +; + + id:long |_index:keyword |_version:long |key:keyword +13 |remote_cluster:apps |1 |remote_cluster:apps_13 +13 |remote_cluster:ul_logs |1 |remote_cluster:ul_logs_13 +14 |remote_cluster:apps |1 |remote_cluster:apps_14 +14 |remote_cluster:ul_logs |1 |remote_cluster:ul_logs_14 + +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv new file mode 100644 index 0000000000000..c02a4a7a5845f --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_sample_data.csv @@ -0,0 +1,8 @@ +@timestamp:date ,client_ip:ip,event_duration:long,message:keyword +2023-10-23T13:55:01.543Z,172.21.3.15 ,1756467,[Connected to 10.1.0.1, Banana] +2023-10-23T13:53:55.832Z,172.21.3.15 ,5033755,[Connection error, Banana] +2023-10-23T13:52:55.015Z,172.21.3.15 ,8268153,[Connection error, Banana] +2023-10-23T13:51:54.732Z,172.21.3.15 , 725448,[Connection error, Banana] +2023-10-23T13:33:34.937Z,172.21.0.5 ,1232382,[Disconnected, Banana] +2023-10-23T12:27:28.948Z,172.21.2.113,2764889,[Connected to 10.1.0.2, Banana] +2023-10-23T12:15:03.360Z,172.21.2.162,3450233,[Connected to 10.1.0.3, Banana] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/qstr-function.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/qstr-function.csv-spec index 3e92e55928d64..2c84bdae6b32e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/qstr-function.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/qstr-function.csv-spec @@ -100,9 +100,8 @@ book_no:keyword | title:text 7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) ; - -matchMultivaluedTextField -required_capability: match_function +qstrWithMultivaluedTextField +required_capability: qstr_function from employees | where qstr("job_positions: (Tech Lead) AND job_positions:(Reporting Analyst)") @@ -118,8 +117,8 @@ emp_no:integer | first_name:keyword | last_name:keyword 10097 | Remzi | Waschkowski ; -matchMultivaluedNumericField -required_capability: match_function +qstrWithMultivaluedNumericField +required_capability: qstr_function from employees | where qstr("salary_change: [14 TO *]") @@ -137,7 +136,7 @@ emp_no:integer | first_name:keyword | last_name:keyword | salary_change:double ; testMultiValuedFieldWithConjunction -required_capability: match_function +required_capability: qstr_function from employees | where (qstr("job_positions: (Data Scientist) OR job_positions:(Support Engineer)")) and gender == "F" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec new file mode 100644 index 0000000000000..d4c7b8c59fdbc --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/scoring.csv-spec @@ -0,0 +1,285 @@ +############################################### +# Tests for scoring support +# + +singleQstrBoostScoringSorted +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("author:Lord Rings^2") +| eval c_score = ceil(_score) +| keep book_no, title, c_score +| sort c_score desc, book_no asc +| LIMIT 2; + +book_no:keyword | title:text | c_score:double +2675 | The Lord of the Rings - Boxed Set | 6.0 +4023 | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | 6.0 +; + +singleMatchWithKeywordFieldScoring +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where author.keyword:"William Faulkner" +| keep book_no, author, _score +| sort book_no; + +book_no:keyword | author:text | _score:double +2713 | William Faulkner | 2.3142893314361572 +2883 | William Faulkner | 2.3142893314361572 +4724 | William Faulkner | 2.3142893314361572 +4977 | William Faulkner | 2.3142893314361572 +5119 | William Faulkner | 2.3142893314361572 +5404 | William Faulkner | 2.3142893314361572 +5578 | William Faulkner | 2.3142893314361572 +8077 | William Faulkner | 2.3142893314361572 +9896 | William Faulkner | 2.3142893314361572 +; + +qstrWithFieldAndScoringSortedEval +required_capability: qstr_function +required_capability: metadata_score + +from books metadata _score +| where qstr("title:rings") +| sort _score desc +| eval _score::long +| keep book_no, title, _score +| limit 3; + +book_no:keyword | title:text | _score:double +2675 | The Lord of the Rings - Boxed Set | 2.7583377361297607 +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.9239964485168457 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9239964485168457 +; + +qstrWithFieldAndScoringSorted +required_capability: qstr_function +required_capability: metadata_score + +from books metadata _score +| where qstr("title:rings") +| sort _score desc, book_no desc +| keep book_no, title, _score +| limit 3; + +book_no:keyword | title:text | _score:double +2675 | The Lord of the Rings - Boxed Set | 2.7583377361297607 +7140 | The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | 1.9239964485168457 +2714 | Return of the King Being the Third Part of The Lord of the Rings | 1.9239964485168457 +; + +singleQstrScoringManipulated +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("author:William Faulkner") +| eval add_score = ceil(_score) + 1 +| keep book_no, author, add_score +| sort book_no +| LIMIT 2; + +book_no:keyword | author:text | add_score:double +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 2.0 +2713 | William Faulkner | 7.0 +; + +testMultiValuedFieldWithConjunctionWithScore +required_capability: match_function +required_capability: metadata_score + +from employees metadata _score +| where match(job_positions, "Data Scientist") and match(job_positions, "Support Engineer") +| keep emp_no, first_name, last_name, job_positions, _score; + +emp_no:integer | first_name:keyword | last_name:keyword | job_positions:keyword | _score:double +10043 | Yishay | Tzvieli | [Data Scientist, Python Developer, Support Engineer] | 5.233309745788574 +; + +testMatchAndQueryStringFunctionsWithScore +required_capability: match_function +required_capability: metadata_score + +from employees metadata _score +| where match(job_positions, "Data Scientist") and qstr("job_positions: (Support Engineer) and gender: F") +| keep emp_no, first_name, last_name, job_positions, _score; +ignoreOrder:true + +emp_no:integer | first_name:keyword | last_name:keyword | job_positions:keyword | _score:double +10041 | Uri | Lenart | [Data Scientist, Head Human Resources, Internship, Senior Team Lead] | 3.509873867034912 +10043 | Yishay | Tzvieli | [Data Scientist, Python Developer, Support Engineer] | 5.233309745788574 +; + +multipleWhereWithMatchScoringNoSort +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where title:"short stories" +| where author:"Ursula K. Le Guin" +| keep book_no, title, author, _score; + +ignoreOrder:true +book_no:keyword | title:text | author:text | _score:double +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 14.489097595214844 +; + +multipleWhereWithMatchScoring +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where title:"short stories" +| where author:"Ursula K. Le Guin" +| keep book_no, title, author, _score +| sort book_no; + +book_no:keyword | title:text | author:text | _score:double +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin | 14.489097595214844 +; + +combinedMatchWithFunctionsScoring +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where title:"Tolkien" AND author:"Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| keep book_no, title, author, year, _score +| sort book_no; + +book_no:keyword | title:text | author:text | year:integer | _score:double +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.448054313659668 +; + +singleQstrScoring +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("author:William Faulkner") +| keep book_no, author, _score +| sort book_no +| LIMIT 2; + +book_no:keyword | author:text | _score:double +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 0.9976131916046143 +2713 | William Faulkner | 5.9556169509887695 +; + +singleQstrScoringGrok +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("author:Lord Rings") +| GROK title "%{WORD:title} %{WORD}" +| sort _score desc +| keep book_no, title, _score +| LIMIT 3; + +book_no:keyword | title:keyword | _score:double +8875 | The | 2.9505908489227295 +4023 | A | 2.8327860832214355 +2675 | The | 2.7583377361297607 +; + +combinedMatchWithScoringEvalNoSort +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where title:"Tolkien" AND author:"Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| eval c_score = ceil(_score) +| keep book_no, title, author, year, c_score; + +ignoreOrder:true +book_no:keyword | title:text | author:text | year:integer | c_score:double +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 6 +; + +singleQstrScoringRename +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("author:Lord Rings") +| rename _score as rank +| sort rank desc +| keep book_no, rank +| LIMIT 3; + +book_no:keyword | rank:double +8875 | 2.9505908489227295 +4023 | 2.8327860832214355 +2675 | 2.7583377361297607 +; + +singleMatchWithTextFieldScoring +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where author:"William Faulkner" +| sort book_no +| keep book_no, author, _score +| limit 5; + +book_no:keyword | author:text | _score:double +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] | 0.9976131916046143 +2713 | William Faulkner | 4.272439002990723 +2847 | Colleen Faulkner | 1.7401835918426514 +2883 | William Faulkner | 4.272439002990723 +3293 | Danny Faulkner | 1.7401835918426514 +; + +combinedMatchWithFunctionsScoringNoSort +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where title:"Tolkien" AND author:"Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| keep book_no, title, author, year, _score; + +ignoreOrder:true +book_no:keyword | title:text | author:text | year:integer | _score:double +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 5.448054313659668 +; + +combinedMatchWithScoringEval +required_capability: metadata_score +required_capability: match_operator_colon + +from books metadata _score +| where title:"Tolkien" AND author:"Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| eval c_score = ceil(_score) +| keep book_no, title, author, year, c_score +| sort book_no; + +book_no:keyword | title:text | author:text | year:integer | c_score:double +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 | 6 +; + +singleQstrScoringEval +required_capability: metadata_score +required_capability: qstr_function + +from books metadata _score +| where qstr("author:Lord Rings") +| eval c_score = ceil(_score) +| keep book_no, c_score +| sort book_no desc +| LIMIT 3; + +book_no:keyword | c_score:double +8875 | 3.0 +7350 | 2.0 +7140 | 3.0 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv index 6cae82cfefa0a..bd5fe7fad3a4e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv @@ -1,4 +1,4 @@ -_id:keyword,semantic_text_field:semantic_text,st_bool:semantic_text,st_cartesian_point:semantic_text,st_cartesian_shape:semantic_text,st_datetime:semantic_text,st_double:semantic_text,st_geopoint:semantic_text,st_geoshape:semantic_text,st_integer:semantic_text,st_ip:semantic_text,st_long:semantic_text,st_unsigned_long:semantic_text,st_version:semantic_text,st_multi_value:semantic_text,st_unicode:semantic_text,host:keyword,description:text,value:long,st_base64:semantic_text -1,live long and prosper,false,"POINT(4297.11 -1475.53)",,1953-09-02T00:00:00.000Z,5.20128E11,"POINT(42.97109630194 14.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",23,1.1.1.1,2147483648,2147483648,1.2.3,["Hello there!", "This is a random value", "for testing purposes"],你吃饭了吗,"host1","some description1",1001,ZWxhc3RpYw== -2,all we have to decide is what to do with the time that is given to us,true,"POINT(7580.93 2272.77)",,2023-09-24T15:57:00.000Z,4541.11,"POINT(37.97109630194 21.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",122,1.1.2.1,123,2147483648.2,9.0.0,["nice to meet you", "bye bye!"],["谢谢", "对不起我的中文不好"],"host2","some description2",1002,aGVsbG8= -3,be excellent to each other,,,,,,,,,,,,,,,"host3","some description3",1003, +_id:keyword,semantic_text_field:semantic_text,st_bool:semantic_text,st_cartesian_point:semantic_text,st_cartesian_shape:semantic_text,st_datetime:semantic_text,st_double:semantic_text,st_geopoint:semantic_text,st_geoshape:semantic_text,st_integer:semantic_text,st_ip:semantic_text,st_long:semantic_text,st_unsigned_long:semantic_text,st_version:semantic_text,st_multi_value:semantic_text,st_unicode:semantic_text,host:keyword,description:text,value:long,st_base64:semantic_text,st_logs:semantic_text +1,live long and prosper,false,"POINT(4297.11 -1475.53)",,1953-09-02T00:00:00.000Z,5.20128E11,"POINT(42.97109630194 14.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",23,1.1.1.1,2147483648,2147483648,1.2.3,["Hello there!", "This is a random value", "for testing purposes"],你吃饭了吗,"host1","some description1",1001,ZWxhc3RpYw==,"2024-12-23T12:15:00.000Z 1.2.3.4 example@example.com 4553" +2,all we have to decide is what to do with the time that is given to us,true,"POINT(7580.93 2272.77)",,2023-09-24T15:57:00.000Z,4541.11,"POINT(37.97109630194 21.7552534413725)","POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))",122,1.1.2.1,123,2147483648.2,9.0.0,["nice to meet you", "bye bye!"],["谢谢", "对不起我的中文不好"],"host2","some description2",1002,aGVsbG8=,"2024-01-23T12:15:00.000Z 1.2.3.4 foo@example.com 42" +3,be excellent to each other,,,,,,,,,,,,,,,"host3","some description3",1003,,"2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec index de2a79df06a50..43dc6e4d4acd2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/semantic_text.csv-spec @@ -88,19 +88,75 @@ _id:keyword | my_field:semantic_text 3 | be excellent to each other ; -simpleStats -required_capability: semantic_text_type +statsWithCount +required_capability: semantic_text_aggregations + +FROM semantic_text METADATA _id +| STATS result = COUNT(st_version) +; + +result:long +2 +; + +statsWithCountDistinct +required_capability: semantic_text_aggregations + +FROM semantic_text METADATA _id +| STATS result = COUNT_DISTINCT(st_version) +; + +result:long +2 +; + +statsWithValues +required_capability: semantic_text_aggregations + +FROM semantic_text METADATA _id +| STATS result = VALUES(st_version) +| EVAL result = MV_SORT(result) +; + +result:keyword +["1.2.3", "9.0.0"] +; + +statsWithMin +required_capability: semantic_text_aggregations + +FROM semantic_text METADATA _id +| STATS result = min(st_version) +; + +result:keyword +1.2.3 +; + +statsWithMax +required_capability: semantic_text_aggregations FROM semantic_text METADATA _id -| STATS COUNT(*) +| STATS result = max(st_version) ; -COUNT(*):long -3 +result:keyword +9.0.0 +; + +statsWithTop +required_capability: semantic_text_aggregations + +FROM semantic_text METADATA _id +| STATS result = top(st_version, 2, "asc") +; + +result:keyword +["1.2.3", "9.0.0"] ; statsWithGrouping -required_capability: semantic_text_type +required_capability: semantic_text_aggregations FROM semantic_text METADATA _id | STATS COUNT(*) BY st_version @@ -132,6 +188,36 @@ COUNT(*):long | my_field:semantic_text 1 | bye bye! ; +grok +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| GROK st_logs """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" +| KEEP st_logs, date, ip, email, num +| SORT st_logs +; + +st_logs:semantic_text | date:keyword | ip:keyword | email:keyword | num:keyword +2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42 | 2023-01-23T12:15:00.000Z | 127.0.0.1 | some.email@foo.com | 42 +2024-01-23T12:15:00.000Z 1.2.3.4 foo@example.com 42 | 2024-01-23T12:15:00.000Z | 1.2.3.4 | foo@example.com | 42 +2024-12-23T12:15:00.000Z 1.2.3.4 example@example.com 4553 | 2024-12-23T12:15:00.000Z | 1.2.3.4 | example@example.com | 4553 +; + +dissect +required_capability: semantic_text_type + +FROM semantic_text METADATA _id +| DISSECT st_logs """%{date} %{ip} %{email} %{num}""" +| KEEP st_logs, date, ip, email, num +| SORT st_logs +; + +st_logs:semantic_text | date:keyword | ip:keyword | email:keyword | num:keyword +2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42 | 2023-01-23T12:15:00.000Z | 127.0.0.1 | some.email@foo.com | 42 +2024-01-23T12:15:00.000Z 1.2.3.4 foo@example.com 42 | 2024-01-23T12:15:00.000Z | 1.2.3.4 | foo@example.com | 42 +2024-12-23T12:15:00.000Z 1.2.3.4 example@example.com 4553 | 2024-12-23T12:15:00.000Z | 1.2.3.4 | example@example.com | 4553 +; + simpleWithLongValue required_capability: semantic_text_type diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 01e7258e8a6ee..ac9948c90f5e9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -3,7 +3,6 @@ ############################################### convertFromStringQuantize -required_capability: spatial_points row wkt = "POINT(42.97109629958868 14.7552534006536)" | eval pt = to_geopoint(wkt); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 96aa779ad38c3..d76f4c05d955f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2348,6 +2348,42 @@ v:integer | job_positions:keyword 10094 | Accountant ; +docsStatsWithSimpleFiltering +required_capability: per_agg_filtering +// tag::aggFiltering[] +FROM employees +| STATS avg50s = AVG(salary)::LONG WHERE birth_date < "1960-01-01", + avg60s = AVG(salary)::LONG WHERE birth_date >= "1960-01-01" + BY gender +| SORT gender +// end::aggFiltering[] +| WHERE gender IS NOT NULL +; + +// tag::aggFiltering-result[] +avg50s:long |avg60s:long |gender:keyword +55462 |46637 |F +48279 |44879 |M +// end::aggFiltering-result[] +; + +docsStatsWithFilteringNoGroups +required_capability: per_agg_filtering +// tag::aggFilteringNoGroup[] +FROM employees +| EVAL Ks = salary / 1000 // thousands +| STATS under_40K = COUNT(*) WHERE Ks < 40, + inbetween = COUNT(*) WHERE 40 <= Ks AND Ks < 60, + over_60K = COUNT(*) WHERE 60 <= Ks, + total = COUNT(*) +// end::aggFilteringNoGroup[] +; + +// tag::aggFilteringNoGroup-result[] +under_40K:long |inbetween:long |over_60K:long |total:long +36 |39 |25 |100 +// end::aggFilteringNoGroup-result[] +; statsWithFiltering required_capability: per_agg_filtering @@ -2468,6 +2504,7 @@ count:long |values:keyword |job_positions:keyword ; prunedStatsFollowedByStats +required_capability: per_agg_filtering from employees | eval my_length = length(concat(first_name, null)) | stats count = count(my_length) where false, @@ -2641,6 +2678,108 @@ c2:l |c2_f:l |m2:i |m2_f:i |c:l 1 |1 |5 |5 |21 ; +simpleCountOnFieldWithFilteringAndNoGrouping +required_capability: per_agg_filtering +from employees +| stats c1 = count(emp_no) where emp_no < 10042 +; + +c1:long +41 +; + +simpleCountOnFieldWithFilteringOnDifferentFieldAndNoGrouping +required_capability: per_agg_filtering +from employees +| stats c1 = count(hire_date) where emp_no < 10042 +; + +c1:long +41 +; + +simpleCountOnStarWithFilteringAndNoGrouping +required_capability: per_agg_filtering +from employees +| stats c1 = count(*) where emp_no < 10042 +; + +c1:long +41 +; + +simpleCountWithFilteringAndNoGroupingOnFieldWithNulls +required_capability: per_agg_filtering +from employees +| stats c1 = count(birth_date) where emp_no <= 10050 +; + +c1:long +40 +; + + +simpleCountWithFilteringAndNoGroupingOnFieldWithMultivalues +required_capability: per_agg_filtering +from employees +| stats c1 = count(job_positions) where emp_no <= 10003 +; + +c1:long +3 +; + +commonFilterExtractionWithAliasing +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| drop emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where eno <= 10010 +; + +min_sal:integer |min_hei:double +36174 |1.56 +; + +commonFilterExtractionWithAliasAndOriginal +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where emp_no <= 10010 +; + +// same results as above in commonFilterExtractionWithAliasing +min_sal:integer |min_hei:double +36174 |1.56 +; + +commonFilterExtractionWithAliasAndOriginalNeedingNormalization +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where emp_no <= 10010, + max_hei = max(height) where 10010 >= emp_no +; + +min_sal:integer |min_hei:double |max_hei:double +36174 |1.56 |2.1 +; + +commonFilterExtractionWithAliasAndOriginalNeedingNormalizationAndSimplification +required_capability: per_agg_filtering +from employees +| eval eno = emp_no +| stats min_sal = min(salary) where eno <= 10010, + min_hei = min(height) where not (emp_no > 10010), + max_hei = max(height) where 10010 >= emp_no +; + +min_sal:integer |min_hei:double |max_hei:double +36174 |1.56 |2.1 +; statsByConstantExpressionNoAggs required_capability: fix_stats_by_foldable_expression @@ -2670,6 +2809,18 @@ m:integer | y+1:integer 11 | 12 ; +statsByConstantExpressionWithAliasAndSort +required_capability: fix_stats_by_foldable_expression_2 +FROM employees +| EVAL y = "a" +| STATS count = COUNT() BY x = y +| SORT x +; + +count:long | x:keyword +100 | a +; + filterIsAlwaysTrue required_capability: per_agg_filtering FROM employees @@ -2795,3 +2946,143 @@ max:integer | job_positions:keyword 39878 | Business Analyst 67492 | Data Scientist ; + +stdDeviation +required_capability: std_dev +// tag::stdev[] +FROM employees +| STATS STD_DEV(height) +// end::stdev[] +; + +// tag::stdev-result[] +STD_DEV(height):double +0.20637044362020449 +// end::stdev-result[] +; + +stdDeviationNested +required_capability: std_dev +// tag::docsStatsStdDevNestedExpression[] +FROM employees +| STATS stddev_salary_change = STD_DEV(MV_MAX(salary_change)) +// end::docsStatsStdDevNestedExpression[] +; + +// tag::docsStatsStdDevNestedExpression-result[] +stddev_salary_change:double +6.875829592924112 +// end::docsStatsStdDevNestedExpression-result[] +; + + +stdDeviationWithLongs +required_capability: std_dev +FROM employees +| STATS STD_DEV(avg_worked_seconds) +; + +STD_DEV(avg_worked_seconds):double +5.76010425971634E7 +; + +stdDeviationWithInts +required_capability: std_dev +FROM employees +| STATS STD_DEV(salary) +; + +STD_DEV(salary):double +13765.12550278783 +; + +stdDeviationConstantValue +required_capability: std_dev +FROM employees +| WHERE languages == 2 +| STATS STD_DEV(languages) +; + +STD_DEV(languages):double +0.0 +; + +stdDeviationGroupedDoublesOnly +required_capability: std_dev +FROM employees +| STATS STD_DEV(height) BY languages +| SORT languages asc +; + +STD_DEV(height):double | languages:integer +0.22106409327010415 | 1 +0.22797190865484734 | 2 +0.18893070075713295 | 3 +0.14656141004227627 | 4 +0.17733860152780256 | 5 +0.2486543786061287 | null +; + +stdDeviationGroupedAllTypes +required_capability: std_dev +FROM employees +| WHERE languages < 3 +| STATS + double_std_dev = STD_DEV(height), + int_std_dev = STD_DEV(salary), + long_std_dev = STD_DEV(avg_worked_seconds) + BY languages +| SORT languages asc +; + +double_std_dev:double | int_std_dev:double | long_std_dev:double | languages:integer +0.22106409327010415 | 15166.244178730898 | 5.1998715922156096E7 | 1 +0.22797190865484734 | 12139.61099378116 | 5.309085506583288E7 | 2 +; + +stdDeviationNoRows +required_capability: std_dev +FROM employees +| WHERE languages IS null +| STATS STD_DEV(languages) +; + +STD_DEV(languages):double +null +; + +stdDevMultiValue +required_capability: std_dev +FROM employees +| STATS STD_DEV(salary_change) +; + +STD_DEV(salary_change):double +7.062226788733394 +; + +stdDevFilter +required_capability: std_dev +FROM employees +| STATS greater_than = STD_DEV(salary_change) WHERE languages > 3 +, less_than = STD_DEV(salary_change) WHERE languages <= 3 +, salary = STD_DEV(salary * 2) +, count = COUNT(*) BY gender +| SORT gender asc +; + +greater_than:double | less_than:double | salary:double | count:long | gender:keyword +6.4543266953142835 | 7.57786788789264 | 29045.770666969744 | 33 | F +6.975232333891946 | 6.604807075547775 | 26171.331109641273 | 57 | M +6.949207097931448 | 7.127229475750027 | 27921.220736207077 | 10 | null +; + +stdDevRow +required_capability: std_dev +ROW a = [1,2,3], b = 5 +| STATS STD_DEV(a), STD_DEV(b) +; + +STD_DEV(a):double | STD_DEV(b):double +0.816496580927726 | 0.0 +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 963245f9f0ea6..e103168d2e589 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -390,7 +390,6 @@ emp_no:integer | name:keyword // Note: no matches in MV returned in -required_capability: mv_warn from employees | where job_positions in ("Internship", first_name) | keep emp_no, job_positions; ignoreOrder:true @@ -582,7 +581,6 @@ emp_no:integer |positions:keyword ; lessThanMultivalue -required_capability: mv_warn from employees | where job_positions < "C" | keep emp_no, job_positions | sort emp_no; warningRegex:evaluation of \[job_positions < \\\"C\\\"\] failed, treating result as null. Only first 20 failures recorded. @@ -595,7 +593,6 @@ emp_no:integer |job_positions:keyword ; greaterThanMultivalue -required_capability: mv_warn from employees | where job_positions > "C" | keep emp_no, job_positions | sort emp_no | limit 6; warningRegex:evaluation of \[job_positions > \\\"C\\\"\] failed, treating result as null. Only first 20 failures recorded. @@ -612,7 +609,6 @@ emp_no:integer |job_positions:keyword ; equalToMultivalue -required_capability: mv_warn from employees | where job_positions == "Accountant" | keep emp_no, job_positions | sort emp_no; warningRegex:evaluation of \[job_positions == \\\"Accountant\\\"\] failed, treating result as null. Only first 20 failures recorded. @@ -624,7 +620,6 @@ emp_no:integer |job_positions:keyword ; equalToOrEqualToMultivalue -required_capability: mv_warn from employees | where job_positions == "Accountant" or job_positions == "Tech Lead" | keep emp_no, job_positions | sort emp_no; warningRegex:evaluation of \[job_positions\] failed, treating result as null. Only first 20 failures recorded. @@ -637,7 +632,6 @@ emp_no:integer |job_positions:keyword ; inMultivalue -required_capability: mv_warn from employees | where job_positions in ("Accountant", "Tech Lead") | keep emp_no, job_positions | sort emp_no; warningRegex:evaluation of \[job_positions in \(\\\"Accountant\\\", \\"Tech Lead\\\"\)\] failed, treating result as null. Only first 20 failures recorded. @@ -650,7 +644,6 @@ emp_no:integer |job_positions:keyword ; notLessThanMultivalue -required_capability: mv_warn from employees | where not(job_positions < "C") | keep emp_no, job_positions | sort emp_no | limit 6; warningRegex:evaluation of \[.*job_positions < \\\"C\\\".*\] failed, treating result as null. Only first 20 failures recorded. @@ -667,7 +660,6 @@ emp_no:integer |job_positions:keyword ; notGreaterThanMultivalue -required_capability: mv_warn from employees | where not(job_positions > "C") | keep emp_no, job_positions | sort emp_no | limit 6; warningRegex:evaluation of \[.*job_positions > \\\"C\\\".*\] failed, treating result as null. Only first 20 failures recorded. @@ -680,7 +672,6 @@ emp_no:integer |job_positions:keyword ; notEqualToMultivalue -required_capability: mv_warn from employees | where not(job_positions == "Accountant") | keep emp_no, job_positions | sort emp_no | limit 6; warningRegex:evaluation of \[.*job_positions == \\\"Accountant\\\".*\] failed, treating result as null. Only first 20 failures recorded. @@ -937,7 +928,6 @@ beta | Kubernetes cluster | [beta k8s server, beta k8s server2 ; lengthOfText -required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = length(host_group), l2 = length(description) | keep l1, l2; ignoreOrder:true @@ -951,7 +941,6 @@ null | 19 ; startsWithText -required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = starts_with(host_group, host), l2 = starts_with(description, host) | keep l1, l2; ignoreOrder:true @@ -965,7 +954,6 @@ false | null ; substringOfText -required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = substring(host_group, 0, 5), l2 = substring(description, 0, 5) | keep l1, l2; ignoreOrder:true @@ -979,7 +967,6 @@ Gatew | null ; concatOfText -required_capability: mv_warn from hosts | where host == "epsilon" | eval l1 = concat(host, "/", host_group), l2 = concat(host_group, "/", description) | sort l1 | keep l1, l2; warning:Line 1:86: evaluation of [concat(host_group, \"/\", description)] failed, treating result as null. Only first 20 failures recorded. @@ -1518,7 +1505,6 @@ min(f_l):integer | max(f_l):integer | job_positions:keyword ; locateWarnings#[skip:-8.13.99,reason:new string function added in 8.14] -required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = locate(host_group, "ate"), l2 = locate(description, "ate") | keep l1, l2; ignoreOrder:true diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index a2fd3f3d5e0da..bf6e2f8ae0893 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -626,6 +626,65 @@ sample_data_ts_nanos | 2023-10-23T12:27:28.948123456Z | 172.21.2.113 | 27648 sample_data_ts_nanos | 2023-10-23T12:15:03.360123456Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; +multiIndex sort millis and nanos as nanos +required_capability: to_date_nanos +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATE_NANOS(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT ts DESC +; + +_index:keyword | ts:date_nanos | client_ip:ip | event_duration:long | message:keyword +sample_data_ts_nanos | 2023-10-23T13:55:01.543123456Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832123456Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015123456Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732123456Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937123456Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948123456Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:27:28.948000000Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360123456Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data | 2023-10-23T12:15:03.360000000Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndex sort millis and nanos as millis +required_capability: to_date_nanos +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATETIME(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT ts DESC, _index DESC +; + +_index:keyword | ts:datetime | client_ip:ip | event_duration:long | message:keyword +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + + multiIndexTsNanosRenameToNanosWithFiltering required_capability: to_date_nanos required_capability: date_nanos_binary_comparison @@ -718,6 +777,7 @@ count:long | @timestamp:date multiIndexTsNanosToDatetimeStats required_capability: union_types required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data, sample_data_ts_nanos | EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) @@ -734,6 +794,8 @@ count:long | @timestamp:date Multi Index millis to nanos stats required_capability: union_types required_capability: union_types_remove_fields +required_capability: to_date_nanos +required_capability: date_trunc_date_nanos FROM sample_data, sample_data_ts_nanos | EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATE_NANOS(@timestamp)) @@ -752,6 +814,7 @@ multiIndexTsLongStatsDrop required_capability: union_types required_capability: union_types_agg_cast required_capability: casting_operator +required_capability: to_date_nanos FROM sample_data, sample_data_ts_long, sample_data_ts_nanos | STATS count=count(*) BY @timestamp::datetime @@ -772,6 +835,7 @@ multiIndexTsLongStatsInline2 required_capability: union_types required_capability: union_types_agg_cast required_capability: casting_operator +required_capability: to_date_nanos FROM sample_data, sample_data_ts_long, sample_data_ts_nanos | STATS count=count(*) BY @timestamp::datetime @@ -915,6 +979,7 @@ multiIndexIpStringTsLong required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | EVAL @timestamp = TO_DATETIME(@timestamp), client_ip = TO_IP(client_ip) @@ -956,6 +1021,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexIpStringTsLongDropped required_capability: union_types required_capability: metadata_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | EVAL @timestamp = TO_DATETIME(@timestamp), client_ip = TO_IP(client_ip) @@ -998,6 +1064,7 @@ multiIndexIpStringTsLongRename required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | EVAL ts = TO_DATETIME(@timestamp), host_ip = TO_IP(client_ip) @@ -1039,6 +1106,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 multiIndexIpStringTsLongRenameDropped required_capability: union_types required_capability: metadata_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | EVAL ts = TO_DATETIME(@timestamp), host_ip = TO_IP(client_ip) @@ -1081,6 +1149,7 @@ multiIndexIpStringTsLongRenameToString required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | EVAL ts = TO_STRING(TO_DATETIME(@timestamp)), host_ip = TO_STRING(TO_IP(client_ip)) @@ -1123,6 +1192,7 @@ multiIndexWhereIpStringTsLong required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) == "172.21.2.162" @@ -1139,6 +1209,7 @@ sample_data_ts_long | 3450233 | Connected to 10.1.0.3 multiIndexWhereIpStringTsLongStats required_capability: union_types required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) == "172.21.2.162" @@ -1155,6 +1226,7 @@ multiIndexWhereIpStringLikeTsLong required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) LIKE "172.21.2.16?" @@ -1171,6 +1243,7 @@ sample_data_ts_long | 3450233 | Connected to 10.1.0.3 multiIndexWhereIpStringLikeTsLongStats required_capability: union_types required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* | WHERE TO_LONG(@timestamp) < 1698068014937 AND TO_STRING(client_ip) LIKE "172.21.2.16?" @@ -1187,6 +1260,7 @@ multiIndexMultiColumnTypesRename required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | WHERE event_duration > 8000000 @@ -1205,6 +1279,7 @@ multiIndexMultiColumnTypesRenameAndKeep required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | WHERE event_duration > 8000000 @@ -1224,6 +1299,7 @@ multiIndexMultiColumnTypesRenameAndDrop required_capability: union_types required_capability: metadata_fields required_capability: union_types_remove_fields +required_capability: to_date_nanos FROM sample_data* METADATA _index | WHERE event_duration > 8000000 @@ -1515,7 +1591,7 @@ FROM sample_data, sample_data_ts_long null | 172.21.0.5 | 1232382 | Disconnected | 8268153 ; -multiIndexIndirectUseOfUnionTypesInLookup +multiIndexIndirectUseOfUnionTypesInLookup-Ignore // TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: // make the csv tests work with multiple indices. required_capability: union_types @@ -1524,7 +1600,7 @@ FROM sample_data, sample_data_ts_long | SORT client_ip ASC | LIMIT 1 | EVAL int = (event_duration - 1232380)::integer -| LOOKUP int_number_names ON int +| LOOKUP_🐔 int_number_names ON int ; @timestamp:null | client_ip:ip | event_duration:long | message:keyword | int:integer | name:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec index 03d0b71894d9b..fbddb3d0e6989 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec @@ -46,7 +46,6 @@ from ul_logs | sort bytes_in desc nulls last, id | limit 12; ; filterPushDownGT -required_capability: mv_warn from ul_logs | where bytes_in >= to_ul(74330435873664882) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; warningRegex:evaluation of \[bytes_in >= to_ul\(74330435873664882\)\] failed, treating result as null. Only first 20 failures recorded. @@ -68,7 +67,6 @@ warningRegex:java.lang.IllegalArgumentException: single-value function encounter ; filterPushDownRange -required_capability: mv_warn from ul_logs | where bytes_in >= to_ul(74330435873664882) | where bytes_in <= to_ul(316080452389500167) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; warningRegex:evaluation of \[bytes_in .* to_ul\(.*\)\] failed, treating result as null. Only first 20 failures recorded. @@ -82,7 +80,6 @@ warningRegex:java.lang.IllegalArgumentException: single-value function encounter ; filterPushDownIn -required_capability: mv_warn // TODO: testing framework doesn't perform implicit conversion to UL of given values, needs explicit conversion from ul_logs | where bytes_in in (to_ul(74330435873664882), to_ul(154551962150890564), to_ul(195161570976258241)) | sort bytes_in | keep bytes_in, id; @@ -96,7 +93,6 @@ warningRegex:java.lang.IllegalArgumentException: single-value function encounter ; filterOnFieldsEquality -required_capability: mv_warn from ul_logs | where bytes_in == bytes_out; warningRegex:evaluation of \[bytes_in == bytes_out\] failed, treating result as null. Only first 20 failures recorded. @@ -107,7 +103,6 @@ warningRegex:java.lang.IllegalArgumentException: single-value function encounter ; filterOnFieldsInequality -required_capability: mv_warn from ul_logs | sort id | where bytes_in < bytes_out | eval b_in = bytes_in / to_ul(pow(10.,15)), b_out = bytes_out / to_ul(pow(10.,15)) | limit 5; warningRegex:evaluation of \[bytes_in < bytes_out\] failed, treating result as null. Only first 20 failures recorded. @@ -138,7 +133,6 @@ from ul_logs | stats c = count(bytes_in) by bytes_in | sort c desc, bytes_in des ; case -required_capability: mv_warn from ul_logs | where case(bytes_in == to_ul(154551962150890564), true, false); warningRegex:evaluation of \[bytes_in == to_ul\(154551962150890564\)\] failed, treating result as null. Only first 20 failures recorded. @@ -149,7 +143,6 @@ warningRegex:java.lang.IllegalArgumentException: single-value function encounter ; toDegrees -required_capability: mv_warn FROM ul_logs | WHERE bytes_in == bytes_out | EVAL deg = TO_DEGREES(bytes_in) | KEEP bytes_in, deg ; @@ -161,7 +154,6 @@ warningRegex:java.lang.IllegalArgumentException: single-value function encounter ; toRadians -required_capability: mv_warn FROM ul_logs | WHERE bytes_in == bytes_out | EVAL rad = TO_RADIANS(bytes_in) | KEEP bytes_in, rad ; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java new file mode 100644 index 0000000000000..440582dcfbb45 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClusterAsyncQueryIT.java @@ -0,0 +1,522 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; + +public class CrossClusterAsyncQueryIT extends AbstractMultiClustersTestCase { + + private static final String REMOTE_CLUSTER_1 = "cluster-a"; + private static final String REMOTE_CLUSTER_2 = "remote-b"; + private static String LOCAL_INDEX = "logs-1"; + private static String REMOTE_INDEX = "logs-2"; + private static final String INDEX_WITH_RUNTIME_MAPPING = "blocking"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); + } + + @Override + protected Map skipUnavailableForRemoteClusters() { + return Map.of(REMOTE_CLUSTER_1, randomBoolean(), REMOTE_CLUSTER_2, randomBoolean()); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); // allows the async_search DELETE action + plugins.add(InternalExchangePlugin.class); + plugins.add(PauseFieldPlugin.class); + return plugins; + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ) + ); + } + } + + @Before + public void resetPlugin() { + PauseFieldPlugin.allowEmitting = new CountDownLatch(1); + PauseFieldPlugin.startEmitting = new CountDownLatch(1); + } + + public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { + public static CountDownLatch startEmitting = new CountDownLatch(1); + public static CountDownLatch allowEmitting = new CountDownLatch(1); + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + if (context == LongFieldScript.CONTEXT) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + startEmitting.countDown(); + try { + assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + emit(1); + } + }; + } + }; + } + throw new IllegalStateException("unsupported type " + context); + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } + } + + /** + * Includes testing for CCS metadata in the GET /_query/async/:id response while the search is still running + */ + public void testSuccessfulPathways() throws Exception { + Map testClusterInfo = setupClusters(3); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remote1NumShards = (Integer) testClusterInfo.get("remote1.num_shards"); + int remote2NumShards = (Integer) testClusterInfo.get("remote2.blocking_index.num_shards"); + + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + AtomicReference asyncExecutionId = new AtomicReference<>(); + + String q = "FROM logs-*,cluster-a:logs-*,remote-b:blocking | STATS total=sum(const) | LIMIT 10"; + try (EsqlQueryResponse resp = runAsyncQuery(q, requestIncludeMeta, null, TimeValue.timeValueMillis(100))) { + assertTrue(resp.isRunning()); + assertNotNull("async execution id is null", resp.asyncExecutionId()); + asyncExecutionId.set(resp.asyncExecutionId().get()); + // executionInfo may or may not be set on the initial response when there is a relatively low wait_for_completion_timeout + // so we do not check for it here + } + + // wait until we know that the query against 'remote-b:blocking' has started + PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS); + + // wait until the query of 'cluster-a:logs-*' has finished (it is not blocked since we are not searching the 'blocking' index on it) + assertBusy(() -> { + try (EsqlQueryResponse asyncResponse = getAsyncResponse(asyncExecutionId.get())) { + EsqlExecutionInfo executionInfo = asyncResponse.getExecutionInfo(); + assertNotNull(executionInfo); + EsqlExecutionInfo.Cluster clusterA = executionInfo.getCluster("cluster-a"); + assertThat(clusterA.getStatus(), not(equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING))); + } + }); + + /* at this point: + * the query against cluster-a should be finished + * the query against remote-b should be running (blocked on the PauseFieldPlugin.allowEmitting CountDown) + * the query against the local cluster should be running because it has a STATS clause that needs to wait on remote-b + */ + try (EsqlQueryResponse asyncResponse = getAsyncResponse(asyncExecutionId.get())) { + EsqlExecutionInfo executionInfo = asyncResponse.getExecutionInfo(); + assertThat(asyncResponse.isRunning(), is(true)); + assertThat( + executionInfo.clusterAliases(), + equalTo(Set.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) + ); + assertThat(executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING), equalTo(2)); + assertThat(executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL), equalTo(1)); + + EsqlExecutionInfo.Cluster clusterA = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(clusterA.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(clusterA.getTotalShards(), greaterThanOrEqualTo(1)); + assertThat(clusterA.getSuccessfulShards(), equalTo(clusterA.getTotalShards())); + assertThat(clusterA.getSkippedShards(), equalTo(0)); + assertThat(clusterA.getFailedShards(), equalTo(0)); + assertThat(clusterA.getFailures().size(), equalTo(0)); + assertThat(clusterA.getTook().millis(), greaterThanOrEqualTo(0L)); + + EsqlExecutionInfo.Cluster local = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + // should still be RUNNING since the local cluster has to do a STATS on the coordinator, waiting on remoteB + assertThat(local.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING)); + assertThat(clusterA.getTotalShards(), greaterThanOrEqualTo(1)); + + EsqlExecutionInfo.Cluster remoteB = executionInfo.getCluster(REMOTE_CLUSTER_2); + // should still be RUNNING since we haven't released the countdown lock to proceed + assertThat(remoteB.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING)); + assertNull(remoteB.getSuccessfulShards()); // should not be filled in until query is finished + + assertClusterMetadataInResponse(asyncResponse, responseExpectMeta, 3); + } + + // allow remoteB query to proceed + PauseFieldPlugin.allowEmitting.countDown(); + + // wait until both remoteB and local queries have finished + assertBusy(() -> { + try (EsqlQueryResponse asyncResponse = getAsyncResponse(asyncExecutionId.get())) { + EsqlExecutionInfo executionInfo = asyncResponse.getExecutionInfo(); + assertNotNull(executionInfo); + EsqlExecutionInfo.Cluster remoteB = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(remoteB.getStatus(), not(equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING))); + EsqlExecutionInfo.Cluster local = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(local.getStatus(), not(equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING))); + assertThat(asyncResponse.isRunning(), is(false)); + } + }); + + try (EsqlQueryResponse asyncResponse = getAsyncResponse(asyncExecutionId.get())) { + EsqlExecutionInfo executionInfo = asyncResponse.getExecutionInfo(); + assertNotNull(executionInfo); + assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(1L)); + + EsqlExecutionInfo.Cluster clusterA = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(clusterA.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(clusterA.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(clusterA.getTotalShards(), equalTo(remote1NumShards)); + assertThat(clusterA.getSuccessfulShards(), equalTo(remote1NumShards)); + assertThat(clusterA.getSkippedShards(), equalTo(0)); + assertThat(clusterA.getFailedShards(), equalTo(0)); + assertThat(clusterA.getFailures().size(), equalTo(0)); + + EsqlExecutionInfo.Cluster remoteB = executionInfo.getCluster(REMOTE_CLUSTER_2); + assertThat(remoteB.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remoteB.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(remoteB.getTotalShards(), equalTo(remote2NumShards)); + assertThat(remoteB.getSuccessfulShards(), equalTo(remote2NumShards)); + assertThat(remoteB.getSkippedShards(), equalTo(0)); + assertThat(remoteB.getFailedShards(), equalTo(0)); + assertThat(remoteB.getFailures().size(), equalTo(0)); + + EsqlExecutionInfo.Cluster local = executionInfo.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertThat(local.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(local.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(local.getTotalShards(), equalTo(localNumShards)); + assertThat(local.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(local.getSkippedShards(), equalTo(0)); + assertThat(local.getFailedShards(), equalTo(0)); + assertThat(local.getFailures().size(), equalTo(0)); + } finally { + AcknowledgedResponse acknowledgedResponse = deleteAsyncId(asyncExecutionId.get()); + assertThat(acknowledgedResponse.isAcknowledged(), is(true)); + } + } + + public void testAsyncQueriesWithLimit0() throws IOException { + setupClusters(3); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + final TimeValue waitForCompletion = TimeValue.timeValueNanos(randomFrom(1L, Long.MAX_VALUE)); + String asyncExecutionId = null; + try (EsqlQueryResponse resp = runAsyncQuery("FROM logs*,*:logs* | LIMIT 0", requestIncludeMeta, null, waitForCompletion)) { + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + if (resp.isRunning()) { + asyncExecutionId = resp.asyncExecutionId().get(); + assertThat(resp.columns().size(), equalTo(0)); + assertThat(resp.values().hasNext(), is(false)); // values should be empty list + + } else { + assertThat(resp.columns().size(), equalTo(4)); + assertThat(resp.columns().contains(new ColumnInfoImpl("const", "long")), is(true)); + assertThat(resp.columns().contains(new ColumnInfoImpl("id", "keyword")), is(true)); + assertThat(resp.columns().contains(new ColumnInfoImpl("tag", "keyword")), is(true)); + assertThat(resp.columns().contains(new ColumnInfoImpl("v", "long")), is(true)); + assertThat(resp.values().hasNext(), is(false)); // values should be empty list + + assertNotNull(executionInfo); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(LOCAL_CLUSTER, REMOTE_CLUSTER_1, REMOTE_CLUSTER_2))); + + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); + assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remoteCluster.getTotalShards(), equalTo(0)); + assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); + assertThat(remoteCluster.getSkippedShards(), equalTo(0)); + assertThat(remoteCluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster remote2Cluster = executionInfo.getCluster(REMOTE_CLUSTER_1); + assertThat(remote2Cluster.getIndexExpression(), equalTo("logs*")); + assertThat(remote2Cluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(remote2Cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(remote2Cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote2Cluster.getTotalShards(), equalTo(0)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + + EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); + assertThat(localCluster.getIndexExpression(), equalTo("logs*")); + assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); + assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(remote2Cluster.getTotalShards(), equalTo(0)); + assertThat(remote2Cluster.getSuccessfulShards(), equalTo(0)); + assertThat(remote2Cluster.getSkippedShards(), equalTo(0)); + assertThat(remote2Cluster.getFailedShards(), equalTo(0)); + + assertClusterMetadataInResponse(resp, responseExpectMeta, 3); + } + } finally { + if (asyncExecutionId != null) { + AcknowledgedResponse acknowledgedResponse = deleteAsyncId(asyncExecutionId); + assertThat(acknowledgedResponse.isAcknowledged(), is(true)); + } + } + } + + protected EsqlQueryResponse runAsyncQuery(String query, Boolean ccsMetadata, QueryBuilder filter, TimeValue waitCompletionTime) { + EsqlQueryRequest request = EsqlQueryRequest.asyncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.profile(randomInt(5) == 2); + request.columnar(randomBoolean()); + if (ccsMetadata != null) { + request.includeCCSMetadata(ccsMetadata); + } + request.waitForCompletionTimeout(waitCompletionTime); + request.keepOnCompletion(false); + if (filter != null) { + request.filter(filter); + } + return runAsyncQuery(request); + } + + protected EsqlQueryResponse runAsyncQuery(EsqlQueryRequest request) { + try { + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout waiting for query response", e); + } + } + + AcknowledgedResponse deleteAsyncId(String id) { + try { + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + return client().execute(TransportDeleteAsyncResultAction.TYPE, request).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout waiting for DELETE response", e); + } + } + + EsqlQueryResponse getAsyncResponse(String id) { + try { + var getResultsRequest = new GetAsyncResultRequest(id).setWaitForCompletionTimeout(timeValueMillis(1)); + return client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout waiting for GET async result", e); + } + } + + private static void assertClusterMetadataInResponse(EsqlQueryResponse resp, boolean responseExpectMeta, int numClusters) { + try { + final Map esqlResponseAsMap = XContentTestUtils.convertToMap(resp); + final Object clusters = esqlResponseAsMap.get("_clusters"); + if (responseExpectMeta) { + assertNotNull(clusters); + // test a few entries to ensure it looks correct (other tests do a full analysis of the metadata in the response) + @SuppressWarnings("unchecked") + Map inner = (Map) clusters; + assertTrue(inner.containsKey("total")); + assertThat((int) inner.get("total"), equalTo(numClusters)); + assertTrue(inner.containsKey("details")); + } else { + assertNull(clusters); + } + } catch (IOException e) { + fail("Could not convert ESQLQueryResponse to Map: " + e); + } + } + + /** + * v1: value to send to runQuery (can be null; null means use default value) + * v2: whether to expect CCS Metadata in the response (cannot be null) + * @return + */ + public static Tuple randomIncludeCCSMetadata() { + return switch (randomIntBetween(1, 3)) { + case 1 -> new Tuple<>(Boolean.TRUE, Boolean.TRUE); + case 2 -> new Tuple<>(Boolean.FALSE, Boolean.FALSE); + case 3 -> new Tuple<>(null, Boolean.FALSE); + default -> throw new AssertionError("should not get here"); + }; + } + + Map setupClusters(int numClusters) throws IOException { + assert numClusters == 2 || numClusters == 3 : "2 or 3 clusters supported not: " + numClusters; + int numShardsLocal = randomIntBetween(1, 5); + populateLocalIndices(LOCAL_INDEX, numShardsLocal); + + int numShardsRemote = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE_CLUSTER_1, REMOTE_INDEX, numShardsRemote); + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.num_shards", numShardsLocal); + clusterInfo.put("local.index", LOCAL_INDEX); + clusterInfo.put("remote1.num_shards", numShardsRemote); + clusterInfo.put("remote1.index", REMOTE_INDEX); + + if (numClusters == 3) { + int numShardsRemote2 = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE_CLUSTER_2, REMOTE_INDEX, numShardsRemote2); + populateRemoteIndicesWithRuntimeMapping(REMOTE_CLUSTER_2); + clusterInfo.put("remote2.index", REMOTE_INDEX); + clusterInfo.put("remote2.num_shards", numShardsRemote2); + clusterInfo.put("remote2.blocking_index", INDEX_WITH_RUNTIME_MAPPING); + clusterInfo.put("remote2.blocking_index.num_shards", 1); + } + + String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER_1); + Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER_1).clusterService().getClusterSettings().get(skipUnavailableKey); + boolean skipUnavailable = (boolean) cluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).clusterService() + .getClusterSettings() + .get(skipUnavailableSetting); + clusterInfo.put("remote.skip_unavailable", skipUnavailable); + + return clusterInfo; + } + + void populateLocalIndices(String indexName, int numShards) { + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long", "const", "type=long") + ); + for (int i = 0; i < 10; i++) { + localClient.prepareIndex(indexName).setSource("id", "local-" + i, "tag", "local", "v", i).get(); + } + localClient.admin().indices().prepareRefresh(indexName).get(); + } + + void populateRemoteIndicesWithRuntimeMapping(String clusterAlias) throws IOException { + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("runtime"); + { + mapping.startObject("const"); + { + mapping.field("type", "long"); + mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + mapping.endObject(); + client(clusterAlias).admin().indices().prepareCreate(INDEX_WITH_RUNTIME_MAPPING).setMapping(mapping).get(); + BulkRequestBuilder bulk = client(clusterAlias).prepareBulk(INDEX_WITH_RUNTIME_MAPPING) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10; i++) { + bulk.add(new IndexRequest().source("foo", i)); + } + bulk.get(); + } + + void populateRemoteIndices(String clusterAlias, String indexName, int numShards) throws IOException { + Client remoteClient = client(clusterAlias); + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + remoteClient.prepareIndex(indexName).setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get(); + } + remoteClient.admin().indices().prepareRefresh(indexName).get(); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java index df6a1e00b0212..5ffc92636b272 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.action; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -15,6 +16,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.compute.operator.DriverTaskRunner; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; @@ -27,8 +29,10 @@ import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.plugin.ComputeService; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; @@ -40,8 +44,10 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomPragmas; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; @@ -173,20 +179,63 @@ public void testCancel() throws Exception { }); var cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTasks.get(0).taskId()).setReason("proxy timeout"); client().execute(TransportCancelTasksAction.TYPE, cancelRequest); - assertBusy(() -> { - List drivers = client(REMOTE_CLUSTER).admin() - .cluster() - .prepareListTasks() - .setActions(DriverTaskRunner.ACTION_NAME) - .get() - .getTasks(); - assertThat(drivers.size(), greaterThanOrEqualTo(1)); - for (TaskInfo driver : drivers) { - assertTrue(driver.cancellable()); - } - }); - PauseFieldPlugin.allowEmitting.countDown(); + try { + assertBusy(() -> { + List drivers = client(REMOTE_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(DriverTaskRunner.ACTION_NAME) + .get() + .getTasks(); + assertThat(drivers.size(), greaterThanOrEqualTo(1)); + for (TaskInfo driver : drivers) { + assertTrue(driver.cancelled()); + } + }); + } finally { + PauseFieldPlugin.allowEmitting.countDown(); + } Exception error = expectThrows(Exception.class, requestFuture::actionGet); assertThat(error.getMessage(), containsString("proxy timeout")); } + + public void testSameRemoteClusters() throws Exception { + TransportAddress address = cluster(REMOTE_CLUSTER).getInstance(TransportService.class).getLocalNode().getAddress(); + int moreClusters = between(1, 5); + for (int i = 0; i < moreClusters; i++) { + String clusterAlias = REMOTE_CLUSTER + "-" + i; + configureRemoteClusterWithSeedAddresses(clusterAlias, List.of(address)); + } + int numDocs = between(10, 100); + createRemoteIndex(numDocs); + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query("FROM *:test | STATS total=sum(const) | LIMIT 1"); + request.pragmas(randomPragmas()); + ActionFuture future = client().execute(EsqlQueryAction.INSTANCE, request); + try { + try { + assertBusy(() -> { + List tasks = client(REMOTE_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(ComputeService.CLUSTER_ACTION_NAME) + .get() + .getTasks(); + assertThat(tasks, hasSize(moreClusters + 1)); + }); + } finally { + PauseFieldPlugin.allowEmitting.countDown(); + } + try (EsqlQueryResponse resp = future.actionGet(30, TimeUnit.SECONDS)) { + // TODO: This produces incorrect results because data on the remote cluster is processed multiple times. + long expectedCount = numDocs * (moreClusters + 1L); + assertThat(getValuesList(resp), equalTo(List.of(List.of(expectedCount)))); + } + } finally { + for (int i = 0; i < moreClusters; i++) { + String clusterAlias = REMOTE_CLUSTER + "-" + i; + removeRemoteCluster(clusterAlias); + } + } + } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index ba44adb5a85e0..596c70e57ccd6 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Priority; @@ -21,12 +22,16 @@ import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -35,30 +40,40 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class CrossClustersQueryIT extends AbstractMultiClustersTestCase { - private static final String REMOTE_CLUSTER = "cluster-a"; + private static final String REMOTE_CLUSTER_1 = "cluster-a"; + private static final String REMOTE_CLUSTER_2 = "remote-b"; + private static String LOCAL_INDEX = "logs-1"; + private static String IDX_ALIAS = "alias1"; + private static String FILTERED_IDX_ALIAS = "alias-filtered-1"; + private static String REMOTE_INDEX = "logs-2"; @Override protected Collection remoteClusterAlias() { - return List.of(REMOTE_CLUSTER); + return List.of(REMOTE_CLUSTER_1, REMOTE_CLUSTER_2); } @Override protected Map skipUnavailableForRemoteClusters() { - return Map.of(REMOTE_CLUSTER, randomBoolean()); + return Map.of(REMOTE_CLUSTER_1, randomBoolean()); } @Override @@ -90,7 +105,7 @@ public void testSuccessfulPathways() { Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); - try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats sum (v)", requestIncludeMeta)) { + try (EsqlQueryResponse resp = runQuery("from logs-*,c*:logs-* | stats sum (v)", requestIncludeMeta)) { List> values = getValuesList(resp); assertThat(values, hasSize(1)); assertThat(values.get(0), equalTo(List.of(330L))); @@ -102,9 +117,9 @@ public void testSuccessfulPathways() { assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -128,7 +143,7 @@ public void testSuccessfulPathways() { assertClusterMetadataInResponse(resp, responseExpectMeta); } - try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats count(*) by tag | sort tag | keep tag", requestIncludeMeta)) { + try (EsqlQueryResponse resp = runQuery("from logs-*,c*:logs-* | stats count(*) by tag | sort tag | keep tag", requestIncludeMeta)) { List> values = getValuesList(resp); assertThat(values, hasSize(2)); assertThat(values.get(0), equalTo(List.of("local"))); @@ -141,9 +156,9 @@ public void testSuccessfulPathways() { assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -168,171 +183,695 @@ public void testSuccessfulPathways() { } } - public void testSearchesWhereMissingIndicesAreSpecified() { - Map testClusterInfo = setupTwoClusters(); + public void testSearchesAgainstNonMatchingIndicesWithLocalOnly() { + Map testClusterInfo = setupClusters(2); + String localIndex = (String) testClusterInfo.get("local.index"); + + { + String q = "FROM nomatch," + localIndex; + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> runQuery(q, false)); + assertThat(e.getDetailedMessage(), containsString("no such index [nomatch]")); + + // MP TODO: am I able to fix this from the field-caps call? Yes, if we detect concrete vs. wildcard expressions in user query + // TODO bug - this does not throw; uncomment this test once https://github.com/elastic/elasticsearch/issues/114495 is fixed + // String limit0 = q + " | LIMIT 0"; + // VerificationException ve = expectThrows(VerificationException.class, () -> runQuery(limit0, false)); + // assertThat(ve.getDetailedMessage(), containsString("No matching indices for [nomatch]")); + } + + { + // no failure since concrete index matches, so wildcard matching is lenient + String q = "FROM nomatch*," + localIndex; + try (EsqlQueryResponse resp = runQuery(q, false)) { + // we are only testing that this does not throw an Exception, so the asserts below are minimal + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, false)) { + // we are only testing that this does not throw an Exception, so the asserts below are minimal + assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(false)); + } + } + { + String q = "FROM nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + } + { + String q = "FROM nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, false)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch*]")); + } + } + + public void testSearchesAgainstIndicesWithNoMappingsSkipUnavailableTrue() { + int numClusters = 2; + setupClusters(numClusters); + Map clusterToEmptyIndexMap = createEmptyIndicesWithNoMappings(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, randomBoolean()); + + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + try { + String emptyIndex = clusterToEmptyIndexMap.get(REMOTE_CLUSTER_1); + String q = Strings.format("FROM cluster-a:%s", emptyIndex); + // query without referring to fields should work + { + String limit1 = q + " | LIMIT 1"; + try (EsqlQueryResponse resp = runQuery(limit1, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + assertThat(resp.columns().get(0).name(), equalTo("")); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of(new ExpectedCluster(REMOTE_CLUSTER_1, emptyIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), equalTo(1)); + assertThat(resp.columns().get(0).name(), equalTo("")); + assertThat(getValuesList(resp).size(), equalTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of(new ExpectedCluster(REMOTE_CLUSTER_1, emptyIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0)) + ); + } + } + + // query that refers to missing fields should throw: + // "type": "verification_exception", + // "reason": "Found 1 problem\nline 2:7: Unknown column [foo]", + { + String keepQuery = q + " | KEEP foo | LIMIT 100"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(keepQuery, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown column [foo]")); + } + + } finally { + clearSkipUnavailable(); + } + } + + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableTrue() { + int numClusters = 3; + Map testClusterInfo = setupClusters(numClusters); int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); - int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + int remote1NumShards = (Integer) testClusterInfo.get("remote.num_shards"); + int remote2NumShards = (Integer) testClusterInfo.get("remote2.num_shards"); + String localIndex = (String) testClusterInfo.get("local.index"); + String remote1Index = (String) testClusterInfo.get("remote.index"); + String remote2Index = (String) testClusterInfo.get("remote2.index"); + + createIndexAliases(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, true); + setSkipUnavailable(REMOTE_CLUSTER_2, true); Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); - // since a valid local index was specified, the invalid index on cluster-a does not throw an exception, - // but instead is simply ignored - ensure this is captured in the EsqlExecutionInfo - try (EsqlQueryResponse resp = runQuery("from logs-*,cluster-a:no_such_index | stats sum (v)", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of(45L))); + try { + // missing concrete local index is fatal + { + String q = "FROM nomatch,cluster-a:" + randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + } - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s,cluster-a:nomatch", localIndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThan(0)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + } - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String remoteIndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = "FROM nomatch*,cluster-a:" + remoteIndexName; + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster( + REMOTE_CLUSTER_1, + remoteIndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote1NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), equalTo(0)); + assertThat(resp.columns().size(), greaterThan(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + // LIMIT 0 searches always have total shards = 0 + new ExpectedCluster(REMOTE_CLUSTER_1, remoteIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); // 0 since no matching index, thus no shards to search - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + // since at least one index of the query matches on some cluster, a wildcarded index on skip_un=true is not an error + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s,cluster-a:nomatch*", localIndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, localNumShards), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThan(0)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + new ExpectedCluster(LOCAL_CLUSTER, localIndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + ) + ); + } + } - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("logs-*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); - assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); - } + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true + { + // with non-matching concrete index + String q = "FROM cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); - // since the remote cluster has a valid index expression, the missing local index is ignored - // make this is captured in the EsqlExecutionInfo - try ( - EsqlQueryResponse resp = runQuery( - "from no_such_index,*:logs-* | stats count(*) by tag | sort tag | keep tag", - requestIncludeMeta - ) - ) { - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of("remote"))); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true and the + // index was wildcarded + { + // with non-matching wildcard index + String q = "FROM cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + // an error is thrown if there are no matching indices at all - local with wildcard, remote with concrete + { + String q = "FROM nomatch*,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("logs-*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); + } - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("no_such_index")); - // TODO: a follow on PR will change this to throw an Exception when the local cluster requests a concrete index that is missing - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(0)); - assertThat(localCluster.getSuccessfulShards(), equalTo(0)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); - } + // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard + { + String q = "FROM nomatch*,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); - // when multiple invalid indices are specified on the remote cluster, both should be ignored and present - // in the index expression of the EsqlExecutionInfo and with an indication that zero shards were searched - try ( - EsqlQueryResponse resp = runQuery( - "FROM no_such_index*,*:no_such_index1,*:no_such_index2,logs-1 | STATS COUNT(*) by tag | SORT tag | KEEP tag", - requestIncludeMeta - ) - ) { - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of("local"))); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); + } - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // an error is thrown if there are no matching indices at all - local with concrete, remote with concrete + { + String q = "FROM nomatch,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); + } - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index1,no_such_index2")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + // an error is thrown if there are no matching indices at all - local with concrete, remote with wildcard + { + String q = "FROM nomatch,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("no_such_index*,logs-1")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); - assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + } + + // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown + { + // TODO solve in follow-on PR which does skip_unavailable handling at execution time + // String q = Strings.format("FROM %s,cluster-a:nomatch,cluster-a:%s*", localIndex, remote1Index); + // try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + // assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + // EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + // assertThat(executionInfo.isCrossClusterSearch(), is(true)); + // assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // assertExpectedClustersForMissingIndicesTests(executionInfo, List.of( + // // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + // new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + // new ExpectedCluster(REMOTE_CLUSTER_1, "*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, remote2NumShards) + // )); + // } + + // TODO: handle LIMIT 0 for this case in follow-on PR + // String limit0 = q + " | LIMIT 0"; + // try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + // assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + // assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + // EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + // assertThat(executionInfo.isCrossClusterSearch(), is(true)); + // assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + // assertExpectedClustersForMissingIndicesTests(executionInfo, List.of( + // // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + // new ExpectedCluster(LOCAL_CLUSTER, localIndex, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + // new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch," + remote1Index + "*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0) + // )); + // } + } + + // tests with three clusters --- + + // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown + // cluster-a should be marked as SKIPPED with VerificationException + { + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM nomatch*,cluster-a:nomatch,%s:%s", REMOTE_CLUSTER_2, remote2IndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster( + REMOTE_CLUSTER_2, + remote2IndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote2NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster(REMOTE_CLUSTER_2, remote2IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } + + // since cluster-a is skip_unavailable=true and at least one cluster has a matching indices, no error is thrown + // cluster-a should be marked as SKIPPED with a "NoMatchingIndicesException" since a wildcard index was requested + { + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM nomatch*,cluster-a:nomatch*,%s:%s", REMOTE_CLUSTER_2, remote2IndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster( + REMOTE_CLUSTER_2, + remote2IndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote2NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(resp.columns().size(), greaterThanOrEqualTo(1)); + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster(REMOTE_CLUSTER_1, "nomatch*", EsqlExecutionInfo.Cluster.Status.SKIPPED, 0), + new ExpectedCluster(REMOTE_CLUSTER_2, remote2IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } + } finally { + clearSkipUnavailable(); } + } - // wildcard on remote cluster that matches nothing - should be present in EsqlExecutionInfo marked as SKIPPED, no shards searched - try (EsqlQueryResponse resp = runQuery("from cluster-a:no_such_index*,logs-* | stats sum (v)", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - List> values = getValuesList(resp); - assertThat(values, hasSize(1)); - assertThat(values.get(0), equalTo(List.of(45L))); + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableFalse() { + int numClusters = 3; + Map testClusterInfo = setupClusters(numClusters); + int remote1NumShards = (Integer) testClusterInfo.get("remote.num_shards"); + String localIndex = (String) testClusterInfo.get("local.index"); + String remote1Index = (String) testClusterInfo.get("remote.index"); + String remote2Index = (String) testClusterInfo.get("remote2.index"); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + createIndexAliases(numClusters); + setSkipUnavailable(REMOTE_CLUSTER_1, false); + setSkipUnavailable(REMOTE_CLUSTER_2, false); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); + try { + // missing concrete local index is an error + { + String q = "FROM nomatch,cluster-a:" + remote1Index; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [nomatch]")); + } - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("logs-*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(localNumShards)); - assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); + // missing concrete remote index is fatal when skip_unavailable=false + { + String q = "FROM logs*,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } + + // No error since local non-matching has wildcard and the remote cluster matches + { + String remote1IndexName = randomFrom(remote1Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_1, remote1IndexName); + try (EsqlQueryResponse resp = runQuery(q, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), greaterThanOrEqualTo(1)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + new ExpectedCluster( + REMOTE_CLUSTER_1, + remote1IndexName, + EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, + remote1NumShards + ) + ) + ); + } + + String limit0 = q + " | LIMIT 0"; + try (EsqlQueryResponse resp = runQuery(limit0, requestIncludeMeta)) { + assertThat(getValuesList(resp).size(), equalTo(0)); + assertThat(resp.columns().size(), greaterThan(0)); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.isCrossClusterSearch(), is(true)); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertExpectedClustersForMissingIndicesTests( + executionInfo, + List.of( + // local cluster is never marked as SKIPPED even when no matcing indices - just marked as 0 shards searched + new ExpectedCluster(LOCAL_CLUSTER, "nomatch*", EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0), + // LIMIT 0 searches always have total shards = 0 + new ExpectedCluster(REMOTE_CLUSTER_1, remote1IndexName, EsqlExecutionInfo.Cluster.Status.SUCCESSFUL, 0) + ) + ); + } + } + + // query is fatal since cluster-a has skip_unavailable=false and has no matching indices + { + String q = Strings.format("FROM %s,cluster-a:nomatch*", randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS)); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - single remote cluster with concrete index expression + { + String q = "FROM cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } + + // an error is thrown if there are no matching indices at all - single remote cluster with wildcard index expression + { + String q = "FROM cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - local with wildcard, remote with concrete + { + String q = "FROM nomatch*,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - local with wildcard, remote with wildcard + { + String q = "FROM nomatch*,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch*]")); + } + + // an error is thrown if there are no matching indices at all - local with concrete, remote with concrete + { + String q = "FROM nomatch,cluster-a:nomatch"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch,nomatch]")); + } + + // an error is thrown if there are no matching indices at all - local with concrete, remote with wildcard + { + String q = "FROM nomatch,cluster-a:nomatch*"; + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + } + + // Missing concrete index on skip_unavailable=false cluster is a fatal error, even when another index expression + // against that cluster matches + { + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s,cluster-a:nomatch,cluster-a:%s*", localIndex, remote2IndexName); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("no such index [nomatch]")); + + // TODO: in follow on PR, add support for throwing a VerificationException from this scenario + // String limit0 = q + " | LIMIT 0"; + // VerificationException e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + // assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*,nomatch]")); + } + + // --- test against 3 clusters + + // skip_unavailable=false cluster having no matching indices is a fatal error. This error + // is fatal at plan time, so it throws VerificationException, not IndexNotFoundException (thrown at execution time) + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s*,cluster-a:nomatch,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch]")); + } + + // skip_unavailable=false cluster having no matching indices is a fatal error (even if wildcarded) + { + String localIndexName = randomFrom(localIndex, IDX_ALIAS, FILTERED_IDX_ALIAS); + String remote2IndexName = randomFrom(remote2Index, IDX_ALIAS, FILTERED_IDX_ALIAS); + String q = Strings.format("FROM %s*,cluster-a:nomatch*,%s:%s*", localIndexName, REMOTE_CLUSTER_2, remote2IndexName); + VerificationException e = expectThrows(VerificationException.class, () -> runQuery(q, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(VerificationException.class, () -> runQuery(limit0, requestIncludeMeta)); + assertThat(e.getDetailedMessage(), containsString("Unknown index [cluster-a:nomatch*]")); + } + } finally { + clearSkipUnavailable(); + } + } + + record ExpectedCluster(String clusterAlias, String indexExpression, EsqlExecutionInfo.Cluster.Status status, Integer totalShards) {} + + public void assertExpectedClustersForMissingIndicesTests(EsqlExecutionInfo executionInfo, List expected) { + long overallTookMillis = executionInfo.overallTook().millis(); + assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); + + Set expectedClusterAliases = expected.stream().map(c -> c.clusterAlias()).collect(Collectors.toSet()); + assertThat(executionInfo.clusterAliases(), equalTo(expectedClusterAliases)); + + for (ExpectedCluster expectedCluster : expected) { + EsqlExecutionInfo.Cluster cluster = executionInfo.getCluster(expectedCluster.clusterAlias()); + String msg = cluster.getClusterAlias(); + assertThat(msg, cluster.getIndexExpression(), equalTo(expectedCluster.indexExpression())); + assertThat(msg, cluster.getStatus(), equalTo(expectedCluster.status())); + assertThat(msg, cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertThat(msg, cluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); + assertThat(msg, cluster.getTotalShards(), equalTo(expectedCluster.totalShards())); + if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SUCCESSFUL) { + assertThat(msg, cluster.getSuccessfulShards(), equalTo(expectedCluster.totalShards())); + assertThat(msg, cluster.getSkippedShards(), equalTo(0)); + } else if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SKIPPED) { + assertThat(msg, cluster.getSuccessfulShards(), equalTo(0)); + assertThat(msg, cluster.getSkippedShards(), equalTo(expectedCluster.totalShards())); + assertThat(msg, cluster.getFailures().size(), equalTo(1)); + assertThat(msg, cluster.getFailures().get(0).getCause(), instanceOf(VerificationException.class)); + String expectedMsg = "Unknown index [" + expectedCluster.indexExpression() + "]"; + assertThat(msg, cluster.getFailures().get(0).getCause().getMessage(), containsString(expectedMsg)); + } + // currently failed shards is always zero - change this once we start allowing partial data for individual shard failures + assertThat(msg, cluster.getFailedShards(), equalTo(0)); } } @@ -359,6 +898,10 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); } + // skip_un must be true for the next test or it will fail on "cluster-a:no_such_index*" with a + // VerificationException because there are no matching indices for that skip_un=false cluster. + setSkipUnavailable(REMOTE_CLUSTER_1, true); + // cluster-foo* matches nothing and so should not be present in the EsqlExecutionInfo try ( EsqlQueryResponse resp = runQuery( @@ -376,9 +919,9 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("no_such_index*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -395,6 +938,8 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { assertThat(localCluster.getSuccessfulShards(), equalTo(localNumShards)); assertThat(localCluster.getSkippedShards(), equalTo(0)); assertThat(localCluster.getFailedShards(), equalTo(0)); + } finally { + clearSkipUnavailable(); } } @@ -403,10 +948,12 @@ public void testSearchesWhereNonExistentClusterIsSpecifiedWithWildcards() { * (which involves cross-cluster field-caps calls), it is a coordinator only operation at query time * which uses a different pathway compared to queries that require data node (and remote data node) operations * at query time. + * + * Note: the tests covering "nonmatching indices" also do LIMIT 0 tests. + * This one is mostly focuses on took time values. */ public void testCCSExecutionOnSearchesWithLimit0() { setupTwoClusters(); - Tuple includeCCSMetadata = randomIncludeCCSMetadata(); Boolean requestIncludeMeta = includeCCSMetadata.v1(); boolean responseExpectMeta = includeCCSMetadata.v2(); @@ -427,9 +974,9 @@ public void testCCSExecutionOnSearchesWithLimit0() { long overallTookMillis = executionInfo.overallTook().millis(); assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER_1, LOCAL_CLUSTER))); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -449,66 +996,6 @@ public void testCCSExecutionOnSearchesWithLimit0() { assertThat(remoteCluster.getSkippedShards(), equalTo(0)); assertThat(remoteCluster.getFailedShards(), equalTo(0)); } - - try (EsqlQueryResponse resp = runQuery("FROM logs*,cluster-a:nomatch* | LIMIT 0", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); - - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("nomatch*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SKIPPED)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); - - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("logs*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(localCluster.getTotalShards(), equalTo(0)); - assertThat(localCluster.getSuccessfulShards(), equalTo(0)); - assertThat(localCluster.getSkippedShards(), equalTo(0)); - assertThat(localCluster.getFailedShards(), equalTo(0)); - } - - try (EsqlQueryResponse resp = runQuery("FROM nomatch*,cluster-a:* | LIMIT 0", requestIncludeMeta)) { - EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); - assertNotNull(executionInfo); - assertThat(executionInfo.isCrossClusterSearch(), is(true)); - long overallTookMillis = executionInfo.overallTook().millis(); - assertThat(overallTookMillis, greaterThanOrEqualTo(0L)); - assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); - assertThat(executionInfo.clusterAliases(), equalTo(Set.of(REMOTE_CLUSTER, LOCAL_CLUSTER))); - - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); - assertThat(remoteCluster.getIndexExpression(), equalTo("*")); - assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(remoteCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); - - EsqlExecutionInfo.Cluster localCluster = executionInfo.getCluster(LOCAL_CLUSTER); - assertThat(localCluster.getIndexExpression(), equalTo("nomatch*")); - assertThat(localCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); - assertThat(localCluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertThat(localCluster.getTook().millis(), lessThanOrEqualTo(overallTookMillis)); - assertThat(remoteCluster.getTotalShards(), equalTo(0)); - assertThat(remoteCluster.getSuccessfulShards(), equalTo(0)); - assertThat(remoteCluster.getSkippedShards(), equalTo(0)); - assertThat(remoteCluster.getFailedShards(), equalTo(0)); - } } public void testMetadataIndex() { @@ -536,7 +1023,7 @@ public void testMetadataIndex() { assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -571,12 +1058,12 @@ public void testProfile() { .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.rebalance.enable", "none")) .get(); waitForNoInitializingShards(client(LOCAL_CLUSTER), TimeValue.timeValueSeconds(30), "logs-1"); - client(REMOTE_CLUSTER).admin() + client(REMOTE_CLUSTER_1).admin() .indices() .prepareUpdateSettings("logs-2") .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.routing.rebalance.enable", "none")) .get(); - waitForNoInitializingShards(client(REMOTE_CLUSTER), TimeValue.timeValueSeconds(30), "logs-2"); + waitForNoInitializingShards(client(REMOTE_CLUSTER_1), TimeValue.timeValueSeconds(30), "logs-2"); final int localOnlyProfiles; { EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); @@ -593,7 +1080,7 @@ public void testProfile() { EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); assertNotNull(executionInfo); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertNull(remoteCluster); assertThat(executionInfo.isCrossClusterSearch(), is(false)); assertThat(executionInfo.includeCCSMetadata(), is(false)); @@ -621,7 +1108,7 @@ public void testProfile() { assertThat(executionInfo.includeCCSMetadata(), is(false)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -654,7 +1141,7 @@ public void testProfile() { assertThat(executionInfo.includeCCSMetadata(), is(false)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -704,7 +1191,7 @@ public void testWarnings() throws Exception { assertThat(executionInfo.includeCCSMetadata(), is(false)); assertThat(executionInfo.overallTook().millis(), greaterThanOrEqualTo(0L)); - EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER); + EsqlExecutionInfo.Cluster remoteCluster = executionInfo.getCluster(REMOTE_CLUSTER_1); assertThat(remoteCluster.getIndexExpression(), equalTo("logs*")); assertThat(remoteCluster.getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); assertThat(remoteCluster.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -792,22 +1279,32 @@ void waitForNoInitializingShards(Client client, TimeValue timeout, String... ind } Map setupTwoClusters() { - String localIndex = "logs-1"; + return setupClusters(2); + } + + Map setupClusters(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "2 or 3 clusters supported not: " + numClusters; int numShardsLocal = randomIntBetween(1, 5); - populateLocalIndices(localIndex, numShardsLocal); + populateLocalIndices(LOCAL_INDEX, numShardsLocal); - String remoteIndex = "logs-2"; int numShardsRemote = randomIntBetween(1, 5); - populateRemoteIndices(remoteIndex, numShardsRemote); + populateRemoteIndices(REMOTE_CLUSTER_1, REMOTE_INDEX, numShardsRemote); Map clusterInfo = new HashMap<>(); clusterInfo.put("local.num_shards", numShardsLocal); - clusterInfo.put("local.index", localIndex); + clusterInfo.put("local.index", LOCAL_INDEX); clusterInfo.put("remote.num_shards", numShardsRemote); - clusterInfo.put("remote.index", remoteIndex); + clusterInfo.put("remote.index", REMOTE_INDEX); + + if (numClusters == 3) { + int numShardsRemote2 = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE_CLUSTER_2, REMOTE_INDEX, numShardsRemote2); + clusterInfo.put("remote2.index", REMOTE_INDEX); + clusterInfo.put("remote2.num_shards", numShardsRemote2); + } - String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER); - Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER).clusterService().getClusterSettings().get(skipUnavailableKey); + String skipUnavailableKey = Strings.format("cluster.remote.%s.skip_unavailable", REMOTE_CLUSTER_1); + Setting skipUnavailableSetting = cluster(REMOTE_CLUSTER_1).clusterService().getClusterSettings().get(skipUnavailableKey); boolean skipUnavailable = (boolean) cluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).clusterService() .getClusterSettings() .get(skipUnavailableSetting); @@ -816,6 +1313,98 @@ Map setupTwoClusters() { return clusterInfo; } + /** + * For the local cluster and REMOTE_CLUSTER_1 it creates a standard alias to the index created in populateLocalIndices + * and populateRemoteIndices. It also creates a filtered alias against those indices that looks like: + * PUT /_aliases + * { + * "actions": [ + * { + * "add": { + * "index": "my_index", + * "alias": "my_alias", + * "filter": { + * "terms": { + * "v": [1, 2, 4] + * } + * } + * } + * } + * ] + * } + */ + void createIndexAliases(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "Only 2 or 3 clusters allowed in createIndexAliases"; + + int[] allowed = new int[] { 1, 2, 4 }; + QueryBuilder filterBuilder = new TermsQueryBuilder("v", allowed); + + { + Client localClient = client(LOCAL_CLUSTER); + IndicesAliasesResponse indicesAliasesResponse = localClient.admin() + .indices() + .prepareAliases() + .addAlias(LOCAL_INDEX, IDX_ALIAS) + .addAlias(LOCAL_INDEX, FILTERED_IDX_ALIAS, filterBuilder) + .get(); + assertFalse(indicesAliasesResponse.hasErrors()); + } + { + Client remoteClient = client(REMOTE_CLUSTER_1); + IndicesAliasesResponse indicesAliasesResponse = remoteClient.admin() + .indices() + .prepareAliases() + .addAlias(REMOTE_INDEX, IDX_ALIAS) + .addAlias(REMOTE_INDEX, FILTERED_IDX_ALIAS, filterBuilder) + .get(); + assertFalse(indicesAliasesResponse.hasErrors()); + } + if (numClusters == 3) { + Client remoteClient = client(REMOTE_CLUSTER_2); + IndicesAliasesResponse indicesAliasesResponse = remoteClient.admin() + .indices() + .prepareAliases() + .addAlias(REMOTE_INDEX, IDX_ALIAS) + .addAlias(REMOTE_INDEX, FILTERED_IDX_ALIAS, filterBuilder) + .get(); + assertFalse(indicesAliasesResponse.hasErrors()); + } + } + + Map createEmptyIndicesWithNoMappings(int numClusters) { + assert numClusters == 2 || numClusters == 3 : "Only 2 or 3 clusters supported in createEmptyIndicesWithNoMappings"; + + Map clusterToEmptyIndexMap = new HashMap<>(); + + String localIndexName = randomAlphaOfLength(14).toLowerCase(Locale.ROOT) + "1"; + clusterToEmptyIndexMap.put(LOCAL_CLUSTER, localIndexName); + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin().indices().prepareCreate(localIndexName).setSettings(Settings.builder().put("index.number_of_shards", 1)) + ); + + String remote1IndexName = randomAlphaOfLength(14).toLowerCase(Locale.ROOT) + "2"; + clusterToEmptyIndexMap.put(REMOTE_CLUSTER_1, remote1IndexName); + Client remote1Client = client(REMOTE_CLUSTER_1); + assertAcked( + remote1Client.admin().indices().prepareCreate(remote1IndexName).setSettings(Settings.builder().put("index.number_of_shards", 1)) + ); + + if (numClusters == 3) { + String remote2IndexName = randomAlphaOfLength(14).toLowerCase(Locale.ROOT) + "3"; + clusterToEmptyIndexMap.put(REMOTE_CLUSTER_2, remote2IndexName); + Client remote2Client = client(REMOTE_CLUSTER_2); + assertAcked( + remote2Client.admin() + .indices() + .prepareCreate(remote2IndexName) + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + ); + } + + return clusterToEmptyIndexMap; + } + void populateLocalIndices(String indexName, int numShards) { Client localClient = client(LOCAL_CLUSTER); assertAcked( @@ -831,8 +1420,8 @@ void populateLocalIndices(String indexName, int numShards) { localClient.admin().indices().prepareRefresh(indexName).get(); } - void populateRemoteIndices(String indexName, int numShards) { - Client remoteClient = client(REMOTE_CLUSTER); + void populateRemoteIndices(String clusterAlias, String indexName, int numShards) { + Client remoteClient = client(clusterAlias); assertAcked( remoteClient.admin() .indices() @@ -845,4 +1434,23 @@ void populateRemoteIndices(String indexName, int numShards) { } remoteClient.admin().indices().prepareRefresh(indexName).get(); } + + private void setSkipUnavailable(String clusterAlias, boolean skip) { + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("cluster.remote." + clusterAlias + ".skip_unavailable", skip).build()) + .get(); + } + + private void clearSkipUnavailable() { + Settings.Builder settingsBuilder = Settings.builder() + .putNull("cluster.remote." + REMOTE_CLUSTER_1 + ".skip_unavailable") + .putNull("cluster.remote." + REMOTE_CLUSTER_2 + ".skip_unavailable"); + client(LOCAL_CLUSTER).admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder.build()) + .get(); + } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index cde4f10ef556c..1939f81353c0e 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -79,12 +79,17 @@ public class EsqlActionTaskIT extends AbstractPausableIntegTestCase { private String REDUCE_DESCRIPTION; private boolean nodeLevelReduction; + /** + * Number of docs released by {@link #startEsql}. + */ + private int prereleasedDocs; + @Before public void setup() { assumeTrue("requires query pragmas", canUseQueryPragmas()); nodeLevelReduction = randomBoolean(); READ_DESCRIPTION = """ - \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 2147483647] + \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 2147483647, scoreMode = COMPLETE_NO_SCORES] \\_ValuesSourceReaderOperator[fields = [pause_me]] \\_AggregationOperator[mode = INITIAL, aggs = sum of longs] \\_ExchangeSinkOperator""".replace("pageSize()", Integer.toString(pageSize())); @@ -104,6 +109,7 @@ public void testTaskContents() throws Exception { ActionFuture response = startEsql(); try { getTasksStarting(); + logger.info("unblocking script"); scriptPermits.release(pageSize()); List foundTasks = getTasksRunning(); int luceneSources = 0; @@ -216,9 +222,15 @@ private ActionFuture startEsql() { return startEsql("from test | stats sum(pause_me)"); } + /** + * Start an ESQL query, releasing a few docs from the {@code pause_me} + * script so it'll actually start but won't finish it's first page. + */ private ActionFuture startEsql(String query) { scriptPermits.drainPermits(); - scriptPermits.release(between(1, 5)); + // Allow a few docs to calculate os the query gets "started" + prereleasedDocs = between(1, pageSize() / 2); + scriptPermits.release(prereleasedDocs); var settingsBuilder = Settings.builder() // Force shard partitioning because that's all the tests know how to match. It is easier to reason about too. .put("data_partitioning", "shard") @@ -380,12 +392,13 @@ protected void doRun() throws Exception { .get(); ensureYellowAndNoInitializingShards("test"); request.query("FROM test | LIMIT 10"); - request.pragmas(randomPragmas()); + QueryPragmas pragmas = randomPragmas(); + request.pragmas(pragmas); PlainActionFuture future = new PlainActionFuture<>(); client.execute(EsqlQueryAction.INSTANCE, request, future); ExchangeService exchangeService = internalCluster().getInstance(ExchangeService.class, dataNode); - boolean waitedForPages; - final String sessionId; + final boolean waitedForPages; + final String exchangeId; try { List foundTasks = new ArrayList<>(); assertBusy(() -> { @@ -399,12 +412,22 @@ protected void doRun() throws Exception { assertThat(tasks, hasSize(1)); foundTasks.addAll(tasks); }); - sessionId = foundTasks.get(0).taskId().toString(); + final String sessionId = foundTasks.get(0).taskId().toString(); assertTrue(fetchingStarted.await(1, TimeUnit.MINUTES)); - ExchangeSinkHandler exchangeSink = exchangeService.getSinkHandler(sessionId); + List sinkKeys = exchangeService.sinkKeys() + .stream() + .filter( + s -> s.startsWith(sessionId) + // exclude the node-level reduction sink + && s.endsWith("[n]") == false + ) + .toList(); + assertThat(sinkKeys.toString(), sinkKeys.size(), equalTo(1)); + exchangeId = sinkKeys.get(0); + ExchangeSinkHandler exchangeSink = exchangeService.getSinkHandler(exchangeId); waitedForPages = randomBoolean(); if (waitedForPages) { - // do not fail exchange requests until we have some pages + // do not fail exchange requests until we have some pages. assertBusy(() -> assertThat(exchangeSink.bufferSize(), greaterThan(0))); } } finally { @@ -416,7 +439,7 @@ protected void doRun() throws Exception { // As a result, the exchange sinks on data-nodes won't be removed until the inactive_timeout elapses, which is // longer than the assertBusy timeout. if (waitedForPages == false) { - exchangeService.finishSinkHandler(sessionId, failure); + exchangeService.finishSinkHandler(exchangeId, failure); } } finally { transportService.clearAllRules(); @@ -425,6 +448,7 @@ protected void doRun() throws Exception { public void testTaskContentsForTopNQuery() throws Exception { READ_DESCRIPTION = ("\\_LuceneTopNSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 1000, " + + "scoreMode = TOP_DOCS, " + "sorts = [{\"pause_me\":{\"order\":\"asc\",\"missing\":\"_last\",\"unmapped_type\":\"long\"}}]]\n" + "\\_ValuesSourceReaderOperator[fields = [pause_me]]\n" + "\\_ProjectOperator[projection = [1]]\n" @@ -444,6 +468,7 @@ public void testTaskContentsForTopNQuery() throws Exception { ActionFuture response = startEsql("from test | sort pause_me | keep pause_me"); try { getTasksStarting(); + logger.info("unblocking script"); scriptPermits.release(pageSize()); getTasksRunning(); } finally { @@ -455,11 +480,10 @@ public void testTaskContentsForTopNQuery() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107293") public void testTaskContentsForLimitQuery() throws Exception { String limit = Integer.toString(randomIntBetween(pageSize() + 1, 2 * numberOfDocs())); READ_DESCRIPTION = """ - \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = limit()] + \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = limit(), scoreMode = COMPLETE_NO_SCORES] \\_ValuesSourceReaderOperator[fields = [pause_me]] \\_ProjectOperator[projection = [1]] \\_ExchangeSinkOperator""".replace("pageSize()", Integer.toString(pageSize())).replace("limit()", limit); @@ -475,7 +499,8 @@ public void testTaskContentsForLimitQuery() throws Exception { ActionFuture response = startEsql("from test | keep pause_me | limit " + limit); try { getTasksStarting(); - scriptPermits.release(pageSize()); + logger.info("unblocking script"); + scriptPermits.release(pageSize() - prereleasedDocs); getTasksRunning(); } finally { scriptPermits.release(numberOfDocs()); @@ -487,7 +512,7 @@ public void testTaskContentsForLimitQuery() throws Exception { public void testTaskContentsForGroupingStatsQuery() throws Exception { READ_DESCRIPTION = """ - \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 2147483647] + \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 2147483647, scoreMode = COMPLETE_NO_SCORES] \\_ValuesSourceReaderOperator[fields = [foo]] \\_OrdinalsGroupingOperator(aggs = max of longs) \\_ExchangeSinkOperator""".replace("pageSize()", Integer.toString(pageSize())); @@ -504,6 +529,7 @@ public void testTaskContentsForGroupingStatsQuery() throws Exception { ActionFuture response = startEsql("from test | stats max(foo) by pause_me"); try { getTasksStarting(); + logger.info("unblocking script"); scriptPermits.release(pageSize()); getTasksRunning(); } finally { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlNodeFailureIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlNodeFailureIT.java new file mode 100644 index 0000000000000..3a69983a0d86e --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlNodeFailureIT.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Make sure the failures on the data node come back as failures over the wire. + */ +@ESIntegTestCase.ClusterScope(minNumDataNodes = 2) +public class EsqlNodeFailureIT extends AbstractEsqlIntegTestCase { + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), FailingFieldPlugin.class); + } + + /** + * Use a runtime field that fails when loading field values to fail the entire query. + */ + public void testFailureLoadingFields() throws IOException { + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("runtime"); + { + mapping.startObject("fail_me"); + { + mapping.field("type", "long"); + mapping.startObject("script").field("source", "").field("lang", "fail").endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + client().admin().indices().prepareCreate("fail").setSettings(indexSettings(1, 0)).setMapping(mapping.endObject()).get(); + + int docCount = 100; + List docs = new ArrayList<>(docCount); + for (int d = 0; d < docCount; d++) { + docs.add(client().prepareIndex("ok").setSource("foo", d)); + } + docs.add(client().prepareIndex("fail").setSource("foo", 0)); + indexRandom(true, docs); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> run("FROM fail,ok | LIMIT 100").close()); + assertThat(e.getMessage(), equalTo("test failure")); + } + + public static class FailingFieldPlugin extends Plugin implements ScriptPlugin { + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "fail"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + throw new ElasticsearchException("test failure"); + } + }; + } + }; + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java index cff9604053903..3b9359fe66d40 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/LookupFromIndexIT.java @@ -148,7 +148,8 @@ public void testLookupIndex() throws IOException { DataPartitioning.SEGMENT, 1, 10000, - DocIdSetIterator.NO_MORE_DOCS + DocIdSetIterator.NO_MORE_DOCS, + false // no scoring ); ValuesSourceReaderOperator.Factory reader = new ValuesSourceReaderOperator.Factory( List.of( @@ -183,7 +184,8 @@ public void testLookupIndex() throws IOException { DataType.KEYWORD, "lookup", "data", - List.of(new Alias(Source.EMPTY, "l", new ReferenceAttribute(Source.EMPTY, "l", DataType.LONG))) + List.of(new Alias(Source.EMPTY, "l", new ReferenceAttribute(Source.EMPTY, "l", DataType.LONG))), + Source.EMPTY ); DriverContext driverContext = driverContext(); try ( diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java index 1ce92ded8acc6..c52e1b538972b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java @@ -14,9 +14,13 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.MockSearchService; @@ -26,6 +30,7 @@ import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.plugin.ComputeService; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.hamcrest.Matchers; import org.junit.Before; @@ -56,6 +61,18 @@ protected Collection> getMockPlugins() { return plugins; } + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), InternalExchangePlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(3000, 5000))) + .build(); + } + @Before public void setupIndices() { int numIndices = between(10, 20); @@ -113,32 +130,64 @@ public void testConcurrentQueries() throws Exception { } public void testRejection() throws Exception { - String[] nodes = internalCluster().getNodeNames(); - for (String node : nodes) { - MockTransportService ts = (MockTransportService) internalCluster().getInstance(TransportService.class, node); - ts.addRequestHandlingBehavior(ExchangeService.EXCHANGE_ACTION_NAME, (handler, request, channel, task) -> { - handler.messageReceived(request, new TransportChannel() { - @Override - public String getProfileName() { - return channel.getProfileName(); - } - - @Override - public void sendResponse(TransportResponse response) { - channel.sendResponse(new RemoteTransportException("simulated", new EsRejectedExecutionException("test queue"))); - } - - @Override - public void sendResponse(Exception exception) { - channel.sendResponse(exception); - } - }, task); + DiscoveryNode dataNode = randomFrom(internalCluster().clusterService().state().nodes().getDataNodes().values()); + String indexName = "single-node-index"; + client().admin() + .indices() + .prepareCreate(indexName) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.require._name", dataNode.getName()) + ) + .setMapping("user", "type=keyword", "tags", "type=keyword") + .get(); + client().prepareIndex(indexName) + .setSource("user", "u1", "tags", "lucene") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + MockTransportService ts = (MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getName()); + CountDownLatch dataNodeRequestLatch = new CountDownLatch(1); + ts.addRequestHandlingBehavior(ComputeService.DATA_ACTION_NAME, (handler, request, channel, task) -> { + handler.messageReceived(request, channel, task); + dataNodeRequestLatch.countDown(); + }); + + ts.addRequestHandlingBehavior(ExchangeService.EXCHANGE_ACTION_NAME, (handler, request, channel, task) -> { + ts.getThreadPool().generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + channel.sendResponse(e); + } + + @Override + protected void doRun() throws Exception { + assertTrue(dataNodeRequestLatch.await(30, TimeUnit.SECONDS)); + handler.messageReceived(request, new TransportChannel() { + @Override + public String getProfileName() { + return channel.getProfileName(); + } + + @Override + public void sendResponse(TransportResponse response) { + channel.sendResponse(new RemoteTransportException("simulated", new EsRejectedExecutionException("test queue"))); + } + + @Override + public void sendResponse(Exception exception) { + channel.sendResponse(exception); + } + }, task); + } }); - } + }); + try { AtomicReference failure = new AtomicReference<>(); EsqlQueryRequest request = new EsqlQueryRequest(); - request.query("from test-* | stats count(user) by tags"); + request.query("from single-node-index | stats count(user) by tags"); request.acceptedPragmaRisks(true); request.pragmas(randomPragmas()); CountDownLatch queryLatch = new CountDownLatch(1); @@ -151,9 +200,7 @@ public void sendResponse(Exception exception) { assertThat(ExceptionsHelper.status(failure.get()), equalTo(RestStatus.TOO_MANY_REQUESTS)); assertThat(failure.get().getMessage(), equalTo("test queue")); } finally { - for (String node : nodes) { - ((MockTransportService) internalCluster().getInstance(TransportService.class, node)).clearAllRules(); - } + ts.clearAllRules(); } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java new file mode 100644 index 0000000000000..d58637ab52c86 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/KqlFunctionIT.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.List; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.CoreMatchers.containsString; + +public class KqlFunctionIT extends AbstractEsqlIntegTestCase { + + @BeforeClass + protected static void ensureKqlFunctionEnabled() { + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + } + + @Before + public void setupIndex() { + createAndPopulateIndex(); + } + + public void testSimpleKqlQuery() { + var query = """ + FROM test + | WHERE kql("content: dog") + | KEEP id + | SORT id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id")); + assertColumnTypes(resp.columns(), List.of("integer")); + assertValues(resp.values(), List.of(List.of(1), List.of(3), List.of(4), List.of(5))); + } + } + + public void testMultiFieldKqlQuery() { + var query = """ + FROM test + | WHERE kql("dog OR canine") + | KEEP id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id")); + assertColumnTypes(resp.columns(), List.of("integer")); + assertValuesInAnyOrder(resp.values(), List.of(List.of(1), List.of(2), List.of(3), List.of(4), List.of(5))); + } + } + + public void testKqlQueryWithinEval() { + var query = """ + FROM test + | EVAL matches_query = kql("title: fox") + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("[KQL] function is only supported in WHERE commands")); + } + + public void testInvalidKqlQueryEof() { + var query = """ + FROM test + | WHERE kql("content: ((((dog") + """; + + var error = expectThrows(QueryShardException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("Failed to parse KQL query [content: ((((dog]")); + assertThat(error.getRootCause().getMessage(), containsString("line 1:11: mismatched input '('")); + } + + public void testInvalidKqlQueryLexicalError() { + var query = """ + FROM test + | WHERE kql(":") + """; + + var error = expectThrows(QueryShardException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("Failed to parse KQL query [:]")); + assertThat(error.getRootCause().getMessage(), containsString("line 1:1: extraneous input ':' ")); + } + + private void createAndPopulateIndex() { + var indexName = "test"; + var client = client().admin().indices(); + var CreateRequest = client.prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + .setMapping("id", "type=integer", "content", "type=text"); + assertAcked(CreateRequest); + client().prepareBulk() + .add( + new IndexRequest(indexName).id("1") + .source("id", 1, "content", "The quick brown animal swiftly jumps over a lazy dog", "title", "A Swift Fox's Journey") + ) + .add( + new IndexRequest(indexName).id("2") + .source("id", 2, "content", "A speedy brown fox hops effortlessly over a sluggish canine", "title", "The Fox's Leap") + ) + .add( + new IndexRequest(indexName).id("3") + .source("id", 3, "content", "Quick and nimble, the fox vaults over the lazy dog", "title", "Brown Fox in Action") + ) + .add( + new IndexRequest(indexName).id("4") + .source( + "id", + 4, + "content", + "A fox that is quick and brown jumps over a dog that is quite lazy", + "title", + "Speedy Animals" + ) + ) + .add( + new IndexRequest(indexName).id("5") + .source( + "id", + 5, + "content", + "With agility, a quick brown fox bounds over a slow-moving dog", + "title", + "Foxes and Canines" + ) + ) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow(indexName); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java new file mode 100644 index 0000000000000..99f7d48a0d636 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchFunctionIT.java @@ -0,0 +1,299 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.junit.Before; + +import java.util.List; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.CoreMatchers.containsString; + +//@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") +public class MatchFunctionIT extends AbstractEsqlIntegTestCase { + + @Before + public void setupIndex() { + createAndPopulateIndex(); + } + + @Override + protected EsqlQueryResponse run(EsqlQueryRequest request) { + assumeTrue("match function capability not available", EsqlCapabilities.Cap.MATCH_FUNCTION.isEnabled()); + return super.run(request); + } + + public void testSimpleWhereMatch() { + var query = """ + FROM test + | WHERE match(content, "fox") + | KEEP id + | SORT id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id")); + assertColumnTypes(resp.columns(), List.of("integer")); + assertValues(resp.values(), List.of(List.of(1), List.of(6))); + } + } + + public void testCombinedWhereMatch() { + var query = """ + FROM test + | WHERE match(content, "fox") AND id > 5 + | KEEP id + | SORT id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id")); + assertColumnTypes(resp.columns(), List.of("integer")); + assertValues(resp.values(), List.of(List.of(6))); + } + } + + public void testMultipleMatch() { + var query = """ + FROM test + | WHERE match(content, "fox") AND match(content, "brown") + | KEEP id + | SORT id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id")); + assertColumnTypes(resp.columns(), List.of("integer")); + assertValues(resp.values(), List.of(List.of(1), List.of(6))); + } + } + + public void testMultipleWhereMatch() { + var query = """ + FROM test + | WHERE match(content, "fox") AND match(content, "brown") + | EVAL summary = CONCAT("document with id: ", to_str(id), "and content: ", content) + | SORT summary + | LIMIT 4 + | WHERE match(content, "brown fox") + | KEEP id + """; + + var error = expectThrows(ElasticsearchException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("[MATCH] function cannot be used after LIMIT")); + } + + public void testNotWhereMatch() { + var query = """ + FROM test + | WHERE NOT match(content, "brown fox") + | KEEP id + | SORT id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id")); + assertColumnTypes(resp.columns(), List.of("integer")); + assertValues(resp.values(), List.of(List.of(5))); + } + } + + public void testWhereMatchWithScoring() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE match(content, "fox") + | KEEP id, _score + | SORT id ASC + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValues(resp.values(), List.of(List.of(1, 1.156558871269226), List.of(6, 0.9114001989364624))); + } + } + + public void testWhereMatchWithScoringDifferentSort() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE match(content, "fox") + | KEEP id, _score + | SORT id DESC + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValues(resp.values(), List.of(List.of(6, 0.9114001989364624), List.of(1, 1.156558871269226))); + } + } + + public void testWhereMatchWithScoringSortScore() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE match(content, "fox") + | KEEP id, _score + | SORT _score DESC + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValues(resp.values(), List.of(List.of(1, 1.156558871269226), List.of(6, 0.9114001989364624))); + } + } + + public void testWhereMatchWithScoringNoSort() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE content:"fox" + | KEEP id, _score + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValuesInAnyOrder(resp.values(), List.of(List.of(1, 1.156558871269226), List.of(6, 0.9114001989364624))); + } + } + + public void testNonExistingColumn() { + var query = """ + FROM test + | WHERE something:"fox" + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("Unknown column [something]")); + } + + public void testWhereMatchEvalColumn() { + var query = """ + FROM test + | EVAL upper_content = to_upper(content) + | WHERE upper_content:"FOX" + | KEEP id + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat( + error.getMessage(), + containsString("[:] operator cannot operate on [upper_content], which is not a field from an index mapping") + ); + } + + public void testWhereMatchOverWrittenColumn() { + var query = """ + FROM test + | DROP content + | EVAL content = CONCAT("document with ID ", to_str(id)) + | WHERE content:"document" + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat( + error.getMessage(), + containsString("[:] operator cannot operate on [content], which is not a field from an index mapping") + ); + } + + public void testWhereMatchAfterStats() { + var query = """ + FROM test + | STATS count(*) + | WHERE content:"fox" + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("Unknown column [content]")); + } + + public void testWhereMatchWithFunctions() { + var query = """ + FROM test + | WHERE content:"fox" OR to_upper(content) == "FOX" + """; + var error = expectThrows(ElasticsearchException.class, () -> run(query)); + assertThat( + error.getMessage(), + containsString( + "Invalid condition [content:\"fox\" OR to_upper(content) == \"FOX\"]. " + + "[:] operator can't be used as part of an or condition" + ) + ); + } + + public void testWhereMatchWithRow() { + var query = """ + ROW content = "a brown fox" + | WHERE content:"fox" + """; + + var error = expectThrows(ElasticsearchException.class, () -> run(query)); + assertThat( + error.getMessage(), + containsString("[:] operator cannot operate on [\"a brown fox\"], which is not a field from an index mapping") + ); + } + + public void testMatchWithinEval() { + var query = """ + FROM test + | EVAL matches_query = content:"fox" + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("[:] operator is only supported in WHERE commands")); + } + + public void testMatchWithNonTextField() { + var query = """ + FROM test + | WHERE id:"fox" + """; + + var error = expectThrows(VerificationException.class, () -> run(query)); + assertThat(error.getMessage(), containsString("first argument of [id:\"fox\"] must be [string], found value [id] type [integer]")); + } + + private void createAndPopulateIndex() { + var indexName = "test"; + var client = client().admin().indices(); + var CreateRequest = client.prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + .setMapping("id", "type=integer", "content", "type=text"); + assertAcked(CreateRequest); + client().prepareBulk() + .add(new IndexRequest(indexName).id("1").source("id", 1, "content", "This is a brown fox")) + .add(new IndexRequest(indexName).id("2").source("id", 2, "content", "This is a brown dog")) + .add(new IndexRequest(indexName).id("3").source("id", 3, "content", "This dog is really brown")) + .add(new IndexRequest(indexName).id("4").source("id", 4, "content", "The dog is brown but this document is very very long")) + .add(new IndexRequest(indexName).id("5").source("id", 5, "content", "There is also a white cat")) + .add(new IndexRequest(indexName).id("6").source("id", 6, "content", "The quick brown fox jumps over the lazy dog")) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow(indexName); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java index b86c46fd3fa7a..6a360eb319abb 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/MatchOperatorIT.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; -import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; -import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.junit.Before; import java.util.List; @@ -32,12 +30,6 @@ public void setupIndex() { createAndPopulateIndex(); } - @Override - protected EsqlQueryResponse run(EsqlQueryRequest request) { - assumeTrue("match operator capability not available", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); - return super.run(request); - } - public void testSimpleWhereMatch() { var query = """ FROM test @@ -114,6 +106,56 @@ public void testNotWhereMatch() { } } + public void testWhereMatchWithScoring() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE content:"fox" + | KEEP id, _score + | SORT id ASC + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValues(resp.values(), List.of(List.of(1, 1.156558871269226), List.of(6, 0.9114001989364624))); + } + } + + public void testWhereMatchWithScoringDifferentSort() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE content:"fox" + | KEEP id, _score + | SORT id + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValues(resp.values(), List.of(List.of(1, 1.156558871269226), List.of(6, 0.9114001989364624))); + } + } + + public void testWhereMatchWithScoringNoSort() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE content:"fox" + | KEEP id, _score + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValuesInAnyOrder(resp.values(), List.of(List.of(1, 1.156558871269226), List.of(6, 0.9114001989364624))); + } + } + public void testNonExistingColumn() { var query = """ FROM test diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/QueryStringIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/QueryStringIT.java index 03af16d29e9b4..a3d1ac931528c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/QueryStringIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/QueryStringIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.junit.Before; import java.util.List; @@ -137,4 +138,99 @@ private void createAndPopulateIndex() { .get(); ensureYellow(indexName); } + + public void testWhereQstrWithScoring() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE qstr("content: fox") + | KEEP id, _score + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValuesInAnyOrder( + resp.values(), + List.of( + List.of(2, 0.3028995096683502), + List.of(3, 0.3028995096683502), + List.of(4, 0.2547692656517029), + List.of(5, 0.28161853551864624) + ) + ); + + } + } + + public void testWhereQstrWithScoringSorted() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE qstr("content:fox fox") + | KEEP id, _score + | SORT _score DESC + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValues( + resp.values(), + List.of( + List.of(3, 1.5605685710906982), + List.of(2, 0.6057990193367004), + List.of(5, 0.5632370710372925), + List.of(4, 0.5095385313034058) + ) + ); + + } + } + + public void testWhereQstrWithScoringNoSort() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE qstr("content: fox") + | KEEP id, _score + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValuesInAnyOrder( + resp.values(), + List.of( + List.of(2, 0.3028995096683502), + List.of(3, 0.3028995096683502), + List.of(4, 0.2547692656517029), + List.of(5, 0.28161853551864624) + ) + ); + } + } + + public void testWhereQstrWithNonPushableAndScoring() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var query = """ + FROM test + METADATA _score + | WHERE qstr("content: fox") + AND abs(id) > 0 + | EVAL c_score = ceil(_score) + | KEEP id, c_score + | SORT id DESC + | LIMIT 2 + """; + + try (var resp = run(query)) { + assertColumnNames(resp.columns(), List.of("id", "c_score")); + assertColumnTypes(resp.columns(), List.of("integer", "double")); + assertValuesInAnyOrder(resp.values(), List.of(List.of(5, 1.0), List.of(4, 1.0))); + } + } } diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 index 6ec93d203d984..ef875d7ca01d8 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.g4 @@ -85,8 +85,15 @@ WHERE : 'where' -> pushMode(EXPRESSION_MODE); // main section while preserving alphabetical order: // MYCOMMAND : 'mycommand' -> ... DEV_INLINESTATS : {this.isDevVersion()}? 'inlinestats' -> pushMode(EXPRESSION_MODE); -DEV_LOOKUP : {this.isDevVersion()}? 'lookup' -> pushMode(LOOKUP_MODE); +DEV_LOOKUP : {this.isDevVersion()}? 'lookup_🐔' -> pushMode(LOOKUP_MODE); DEV_METRICS : {this.isDevVersion()}? 'metrics' -> pushMode(METRICS_MODE); +// list of all JOIN commands +DEV_JOIN : {this.isDevVersion()}? 'join' -> pushMode(JOIN_MODE); +DEV_JOIN_FULL : {this.isDevVersion()}? 'full' -> pushMode(JOIN_MODE); +DEV_JOIN_LEFT : {this.isDevVersion()}? 'left' -> pushMode(JOIN_MODE); +DEV_JOIN_RIGHT : {this.isDevVersion()}? 'right' -> pushMode(JOIN_MODE); +DEV_JOIN_LOOKUP : {this.isDevVersion()}? 'lookup' -> pushMode(JOIN_MODE); + // // Catch-all for unrecognized commands - don't define any beyond this line @@ -105,8 +112,6 @@ WS : [ \r\n\t]+ -> channel(HIDDEN) ; -COLON : ':'; - // // Expression - used by most command // @@ -177,6 +182,7 @@ AND : 'and'; ASC : 'asc'; ASSIGN : '='; CAST_OP : '::'; +COLON : ':'; COMMA : ','; DESC : 'desc'; DOT : '.'; @@ -209,7 +215,6 @@ MINUS : '-'; ASTERISK : '*'; SLASH : '/'; PERCENT : '%'; -EXPRESSION_COLON : {this.isDevVersion()}? COLON -> type(COLON); NESTED_WHERE : WHERE -> type(WHERE); @@ -543,6 +548,31 @@ LOOKUP_FIELD_WS : WS -> channel(HIDDEN) ; +// +// JOIN-related commands +// +mode JOIN_MODE; +JOIN_PIPE : PIPE -> type(PIPE), popMode; +JOIN_JOIN : DEV_JOIN -> type(DEV_JOIN); +JOIN_AS : AS -> type(AS); +JOIN_ON : ON -> type(ON), popMode, pushMode(EXPRESSION_MODE); +USING : 'USING' -> popMode, pushMode(EXPRESSION_MODE); + +JOIN_UNQUOTED_IDENTIFER: UNQUOTED_IDENTIFIER -> type(UNQUOTED_IDENTIFIER); +JOIN_QUOTED_IDENTIFIER : QUOTED_IDENTIFIER -> type(QUOTED_IDENTIFIER); + +JOIN_LINE_COMMENT + : LINE_COMMENT -> channel(HIDDEN) + ; + +JOIN_MULTILINE_COMMENT + : MULTILINE_COMMENT -> channel(HIDDEN) + ; + +JOIN_WS + : WS -> channel(HIDDEN) + ; + // // METRICS command // diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens index 3dd1a2c754038..b1a16987dd8ce 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseLexer.tokens @@ -17,106 +17,115 @@ WHERE=16 DEV_INLINESTATS=17 DEV_LOOKUP=18 DEV_METRICS=19 -UNKNOWN_CMD=20 -LINE_COMMENT=21 -MULTILINE_COMMENT=22 -WS=23 -COLON=24 -PIPE=25 -QUOTED_STRING=26 -INTEGER_LITERAL=27 -DECIMAL_LITERAL=28 -BY=29 -AND=30 -ASC=31 -ASSIGN=32 -CAST_OP=33 -COMMA=34 -DESC=35 -DOT=36 -FALSE=37 -FIRST=38 -IN=39 -IS=40 -LAST=41 -LIKE=42 -LP=43 -NOT=44 -NULL=45 -NULLS=46 -OR=47 -PARAM=48 -RLIKE=49 -RP=50 -TRUE=51 -EQ=52 -CIEQ=53 -NEQ=54 -LT=55 -LTE=56 -GT=57 -GTE=58 -PLUS=59 -MINUS=60 -ASTERISK=61 -SLASH=62 -PERCENT=63 -NAMED_OR_POSITIONAL_PARAM=64 -OPENING_BRACKET=65 -CLOSING_BRACKET=66 -UNQUOTED_IDENTIFIER=67 -QUOTED_IDENTIFIER=68 -EXPR_LINE_COMMENT=69 -EXPR_MULTILINE_COMMENT=70 -EXPR_WS=71 -EXPLAIN_WS=72 -EXPLAIN_LINE_COMMENT=73 -EXPLAIN_MULTILINE_COMMENT=74 -METADATA=75 -UNQUOTED_SOURCE=76 -FROM_LINE_COMMENT=77 -FROM_MULTILINE_COMMENT=78 -FROM_WS=79 -ID_PATTERN=80 -PROJECT_LINE_COMMENT=81 -PROJECT_MULTILINE_COMMENT=82 -PROJECT_WS=83 -AS=84 -RENAME_LINE_COMMENT=85 -RENAME_MULTILINE_COMMENT=86 -RENAME_WS=87 -ON=88 -WITH=89 -ENRICH_POLICY_NAME=90 -ENRICH_LINE_COMMENT=91 -ENRICH_MULTILINE_COMMENT=92 -ENRICH_WS=93 -ENRICH_FIELD_LINE_COMMENT=94 -ENRICH_FIELD_MULTILINE_COMMENT=95 -ENRICH_FIELD_WS=96 -MVEXPAND_LINE_COMMENT=97 -MVEXPAND_MULTILINE_COMMENT=98 -MVEXPAND_WS=99 -INFO=100 -SHOW_LINE_COMMENT=101 -SHOW_MULTILINE_COMMENT=102 -SHOW_WS=103 -SETTING=104 -SETTING_LINE_COMMENT=105 -SETTTING_MULTILINE_COMMENT=106 -SETTING_WS=107 -LOOKUP_LINE_COMMENT=108 -LOOKUP_MULTILINE_COMMENT=109 -LOOKUP_WS=110 -LOOKUP_FIELD_LINE_COMMENT=111 -LOOKUP_FIELD_MULTILINE_COMMENT=112 -LOOKUP_FIELD_WS=113 -METRICS_LINE_COMMENT=114 -METRICS_MULTILINE_COMMENT=115 -METRICS_WS=116 -CLOSING_METRICS_LINE_COMMENT=117 -CLOSING_METRICS_MULTILINE_COMMENT=118 -CLOSING_METRICS_WS=119 +DEV_JOIN=20 +DEV_JOIN_FULL=21 +DEV_JOIN_LEFT=22 +DEV_JOIN_RIGHT=23 +DEV_JOIN_LOOKUP=24 +UNKNOWN_CMD=25 +LINE_COMMENT=26 +MULTILINE_COMMENT=27 +WS=28 +PIPE=29 +QUOTED_STRING=30 +INTEGER_LITERAL=31 +DECIMAL_LITERAL=32 +BY=33 +AND=34 +ASC=35 +ASSIGN=36 +CAST_OP=37 +COLON=38 +COMMA=39 +DESC=40 +DOT=41 +FALSE=42 +FIRST=43 +IN=44 +IS=45 +LAST=46 +LIKE=47 +LP=48 +NOT=49 +NULL=50 +NULLS=51 +OR=52 +PARAM=53 +RLIKE=54 +RP=55 +TRUE=56 +EQ=57 +CIEQ=58 +NEQ=59 +LT=60 +LTE=61 +GT=62 +GTE=63 +PLUS=64 +MINUS=65 +ASTERISK=66 +SLASH=67 +PERCENT=68 +NAMED_OR_POSITIONAL_PARAM=69 +OPENING_BRACKET=70 +CLOSING_BRACKET=71 +UNQUOTED_IDENTIFIER=72 +QUOTED_IDENTIFIER=73 +EXPR_LINE_COMMENT=74 +EXPR_MULTILINE_COMMENT=75 +EXPR_WS=76 +EXPLAIN_WS=77 +EXPLAIN_LINE_COMMENT=78 +EXPLAIN_MULTILINE_COMMENT=79 +METADATA=80 +UNQUOTED_SOURCE=81 +FROM_LINE_COMMENT=82 +FROM_MULTILINE_COMMENT=83 +FROM_WS=84 +ID_PATTERN=85 +PROJECT_LINE_COMMENT=86 +PROJECT_MULTILINE_COMMENT=87 +PROJECT_WS=88 +AS=89 +RENAME_LINE_COMMENT=90 +RENAME_MULTILINE_COMMENT=91 +RENAME_WS=92 +ON=93 +WITH=94 +ENRICH_POLICY_NAME=95 +ENRICH_LINE_COMMENT=96 +ENRICH_MULTILINE_COMMENT=97 +ENRICH_WS=98 +ENRICH_FIELD_LINE_COMMENT=99 +ENRICH_FIELD_MULTILINE_COMMENT=100 +ENRICH_FIELD_WS=101 +MVEXPAND_LINE_COMMENT=102 +MVEXPAND_MULTILINE_COMMENT=103 +MVEXPAND_WS=104 +INFO=105 +SHOW_LINE_COMMENT=106 +SHOW_MULTILINE_COMMENT=107 +SHOW_WS=108 +SETTING=109 +SETTING_LINE_COMMENT=110 +SETTTING_MULTILINE_COMMENT=111 +SETTING_WS=112 +LOOKUP_LINE_COMMENT=113 +LOOKUP_MULTILINE_COMMENT=114 +LOOKUP_WS=115 +LOOKUP_FIELD_LINE_COMMENT=116 +LOOKUP_FIELD_MULTILINE_COMMENT=117 +LOOKUP_FIELD_WS=118 +USING=119 +JOIN_LINE_COMMENT=120 +JOIN_MULTILINE_COMMENT=121 +JOIN_WS=122 +METRICS_LINE_COMMENT=123 +METRICS_MULTILINE_COMMENT=124 +METRICS_WS=125 +CLOSING_METRICS_LINE_COMMENT=126 +CLOSING_METRICS_MULTILINE_COMMENT=127 +CLOSING_METRICS_WS=128 'dissect'=1 'drop'=2 'enrich'=3 @@ -133,46 +142,47 @@ CLOSING_METRICS_WS=119 'sort'=14 'stats'=15 'where'=16 -':'=24 -'|'=25 -'by'=29 -'and'=30 -'asc'=31 -'='=32 -'::'=33 -','=34 -'desc'=35 -'.'=36 -'false'=37 -'first'=38 -'in'=39 -'is'=40 -'last'=41 -'like'=42 -'('=43 -'not'=44 -'null'=45 -'nulls'=46 -'or'=47 -'?'=48 -'rlike'=49 -')'=50 -'true'=51 -'=='=52 -'=~'=53 -'!='=54 -'<'=55 -'<='=56 -'>'=57 -'>='=58 -'+'=59 -'-'=60 -'*'=61 -'/'=62 -'%'=63 -']'=66 -'metadata'=75 -'as'=84 -'on'=88 -'with'=89 -'info'=100 +'|'=29 +'by'=33 +'and'=34 +'asc'=35 +'='=36 +'::'=37 +':'=38 +','=39 +'desc'=40 +'.'=41 +'false'=42 +'first'=43 +'in'=44 +'is'=45 +'last'=46 +'like'=47 +'('=48 +'not'=49 +'null'=50 +'nulls'=51 +'or'=52 +'?'=53 +'rlike'=54 +')'=55 +'true'=56 +'=='=57 +'=~'=58 +'!='=59 +'<'=60 +'<='=61 +'>'=62 +'>='=63 +'+'=64 +'-'=65 +'*'=66 +'/'=67 +'%'=68 +']'=71 +'metadata'=80 +'as'=89 +'on'=93 +'with'=94 +'info'=105 +'USING'=119 diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 index 67f194a1bff64..f84cfe3060503 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.g4 @@ -54,6 +54,7 @@ processingCommand // in development | {this.isDevVersion()}? inlinestatsCommand | {this.isDevVersion()}? lookupCommand + | {this.isDevVersion()}? joinCommand ; whereCommand @@ -68,7 +69,7 @@ booleanExpression | left=booleanExpression operator=OR right=booleanExpression #logicalBinary | valueExpression (NOT)? IN LP valueExpression (COMMA valueExpression)* RP #logicalIn | valueExpression IS NOT? NULL #isNull - | {this.isDevVersion()}? matchBooleanExpression #matchExpression + | matchBooleanExpression #matchExpression ; regexBooleanExpression @@ -322,3 +323,19 @@ lookupCommand inlinestatsCommand : DEV_INLINESTATS stats=aggFields (BY grouping=fields)? ; + +joinCommand + : type=(DEV_JOIN_LOOKUP | DEV_JOIN_LEFT | DEV_JOIN_RIGHT)? DEV_JOIN joinTarget joinCondition + ; + +joinTarget + : index=identifier (AS alias=identifier)? + ; + +joinCondition + : ON joinPredicate (COMMA joinPredicate)* + ; + +joinPredicate + : valueExpression + ; diff --git a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens index 3dd1a2c754038..b1a16987dd8ce 100644 --- a/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens +++ b/x-pack/plugin/esql/src/main/antlr/EsqlBaseParser.tokens @@ -17,106 +17,115 @@ WHERE=16 DEV_INLINESTATS=17 DEV_LOOKUP=18 DEV_METRICS=19 -UNKNOWN_CMD=20 -LINE_COMMENT=21 -MULTILINE_COMMENT=22 -WS=23 -COLON=24 -PIPE=25 -QUOTED_STRING=26 -INTEGER_LITERAL=27 -DECIMAL_LITERAL=28 -BY=29 -AND=30 -ASC=31 -ASSIGN=32 -CAST_OP=33 -COMMA=34 -DESC=35 -DOT=36 -FALSE=37 -FIRST=38 -IN=39 -IS=40 -LAST=41 -LIKE=42 -LP=43 -NOT=44 -NULL=45 -NULLS=46 -OR=47 -PARAM=48 -RLIKE=49 -RP=50 -TRUE=51 -EQ=52 -CIEQ=53 -NEQ=54 -LT=55 -LTE=56 -GT=57 -GTE=58 -PLUS=59 -MINUS=60 -ASTERISK=61 -SLASH=62 -PERCENT=63 -NAMED_OR_POSITIONAL_PARAM=64 -OPENING_BRACKET=65 -CLOSING_BRACKET=66 -UNQUOTED_IDENTIFIER=67 -QUOTED_IDENTIFIER=68 -EXPR_LINE_COMMENT=69 -EXPR_MULTILINE_COMMENT=70 -EXPR_WS=71 -EXPLAIN_WS=72 -EXPLAIN_LINE_COMMENT=73 -EXPLAIN_MULTILINE_COMMENT=74 -METADATA=75 -UNQUOTED_SOURCE=76 -FROM_LINE_COMMENT=77 -FROM_MULTILINE_COMMENT=78 -FROM_WS=79 -ID_PATTERN=80 -PROJECT_LINE_COMMENT=81 -PROJECT_MULTILINE_COMMENT=82 -PROJECT_WS=83 -AS=84 -RENAME_LINE_COMMENT=85 -RENAME_MULTILINE_COMMENT=86 -RENAME_WS=87 -ON=88 -WITH=89 -ENRICH_POLICY_NAME=90 -ENRICH_LINE_COMMENT=91 -ENRICH_MULTILINE_COMMENT=92 -ENRICH_WS=93 -ENRICH_FIELD_LINE_COMMENT=94 -ENRICH_FIELD_MULTILINE_COMMENT=95 -ENRICH_FIELD_WS=96 -MVEXPAND_LINE_COMMENT=97 -MVEXPAND_MULTILINE_COMMENT=98 -MVEXPAND_WS=99 -INFO=100 -SHOW_LINE_COMMENT=101 -SHOW_MULTILINE_COMMENT=102 -SHOW_WS=103 -SETTING=104 -SETTING_LINE_COMMENT=105 -SETTTING_MULTILINE_COMMENT=106 -SETTING_WS=107 -LOOKUP_LINE_COMMENT=108 -LOOKUP_MULTILINE_COMMENT=109 -LOOKUP_WS=110 -LOOKUP_FIELD_LINE_COMMENT=111 -LOOKUP_FIELD_MULTILINE_COMMENT=112 -LOOKUP_FIELD_WS=113 -METRICS_LINE_COMMENT=114 -METRICS_MULTILINE_COMMENT=115 -METRICS_WS=116 -CLOSING_METRICS_LINE_COMMENT=117 -CLOSING_METRICS_MULTILINE_COMMENT=118 -CLOSING_METRICS_WS=119 +DEV_JOIN=20 +DEV_JOIN_FULL=21 +DEV_JOIN_LEFT=22 +DEV_JOIN_RIGHT=23 +DEV_JOIN_LOOKUP=24 +UNKNOWN_CMD=25 +LINE_COMMENT=26 +MULTILINE_COMMENT=27 +WS=28 +PIPE=29 +QUOTED_STRING=30 +INTEGER_LITERAL=31 +DECIMAL_LITERAL=32 +BY=33 +AND=34 +ASC=35 +ASSIGN=36 +CAST_OP=37 +COLON=38 +COMMA=39 +DESC=40 +DOT=41 +FALSE=42 +FIRST=43 +IN=44 +IS=45 +LAST=46 +LIKE=47 +LP=48 +NOT=49 +NULL=50 +NULLS=51 +OR=52 +PARAM=53 +RLIKE=54 +RP=55 +TRUE=56 +EQ=57 +CIEQ=58 +NEQ=59 +LT=60 +LTE=61 +GT=62 +GTE=63 +PLUS=64 +MINUS=65 +ASTERISK=66 +SLASH=67 +PERCENT=68 +NAMED_OR_POSITIONAL_PARAM=69 +OPENING_BRACKET=70 +CLOSING_BRACKET=71 +UNQUOTED_IDENTIFIER=72 +QUOTED_IDENTIFIER=73 +EXPR_LINE_COMMENT=74 +EXPR_MULTILINE_COMMENT=75 +EXPR_WS=76 +EXPLAIN_WS=77 +EXPLAIN_LINE_COMMENT=78 +EXPLAIN_MULTILINE_COMMENT=79 +METADATA=80 +UNQUOTED_SOURCE=81 +FROM_LINE_COMMENT=82 +FROM_MULTILINE_COMMENT=83 +FROM_WS=84 +ID_PATTERN=85 +PROJECT_LINE_COMMENT=86 +PROJECT_MULTILINE_COMMENT=87 +PROJECT_WS=88 +AS=89 +RENAME_LINE_COMMENT=90 +RENAME_MULTILINE_COMMENT=91 +RENAME_WS=92 +ON=93 +WITH=94 +ENRICH_POLICY_NAME=95 +ENRICH_LINE_COMMENT=96 +ENRICH_MULTILINE_COMMENT=97 +ENRICH_WS=98 +ENRICH_FIELD_LINE_COMMENT=99 +ENRICH_FIELD_MULTILINE_COMMENT=100 +ENRICH_FIELD_WS=101 +MVEXPAND_LINE_COMMENT=102 +MVEXPAND_MULTILINE_COMMENT=103 +MVEXPAND_WS=104 +INFO=105 +SHOW_LINE_COMMENT=106 +SHOW_MULTILINE_COMMENT=107 +SHOW_WS=108 +SETTING=109 +SETTING_LINE_COMMENT=110 +SETTTING_MULTILINE_COMMENT=111 +SETTING_WS=112 +LOOKUP_LINE_COMMENT=113 +LOOKUP_MULTILINE_COMMENT=114 +LOOKUP_WS=115 +LOOKUP_FIELD_LINE_COMMENT=116 +LOOKUP_FIELD_MULTILINE_COMMENT=117 +LOOKUP_FIELD_WS=118 +USING=119 +JOIN_LINE_COMMENT=120 +JOIN_MULTILINE_COMMENT=121 +JOIN_WS=122 +METRICS_LINE_COMMENT=123 +METRICS_MULTILINE_COMMENT=124 +METRICS_WS=125 +CLOSING_METRICS_LINE_COMMENT=126 +CLOSING_METRICS_MULTILINE_COMMENT=127 +CLOSING_METRICS_WS=128 'dissect'=1 'drop'=2 'enrich'=3 @@ -133,46 +142,47 @@ CLOSING_METRICS_WS=119 'sort'=14 'stats'=15 'where'=16 -':'=24 -'|'=25 -'by'=29 -'and'=30 -'asc'=31 -'='=32 -'::'=33 -','=34 -'desc'=35 -'.'=36 -'false'=37 -'first'=38 -'in'=39 -'is'=40 -'last'=41 -'like'=42 -'('=43 -'not'=44 -'null'=45 -'nulls'=46 -'or'=47 -'?'=48 -'rlike'=49 -')'=50 -'true'=51 -'=='=52 -'=~'=53 -'!='=54 -'<'=55 -'<='=56 -'>'=57 -'>='=58 -'+'=59 -'-'=60 -'*'=61 -'/'=62 -'%'=63 -']'=66 -'metadata'=75 -'as'=84 -'on'=88 -'with'=89 -'info'=100 +'|'=29 +'by'=33 +'and'=34 +'asc'=35 +'='=36 +'::'=37 +':'=38 +','=39 +'desc'=40 +'.'=41 +'false'=42 +'first'=43 +'in'=44 +'is'=45 +'last'=46 +'like'=47 +'('=48 +'not'=49 +'null'=50 +'nulls'=51 +'or'=52 +'?'=53 +'rlike'=54 +')'=55 +'true'=56 +'=='=57 +'=~'=58 +'!='=59 +'<'=60 +'<='=61 +'>'=62 +'>='=63 +'+'=64 +'-'=65 +'*'=66 +'/'=67 +'%'=68 +']'=71 +'metadata'=80 +'as'=89 +'on'=93 +'with'=94 +'info'=105 +'USING'=119 diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeEvaluator.java deleted file mode 100644 index c6349907f9b4b..0000000000000 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeEvaluator.java +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.xpack.esql.expression.function.grouping; - -import java.lang.IllegalArgumentException; -import java.lang.Override; -import java.lang.String; -import java.util.function.Function; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.compute.operator.Warnings; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.ml.aggs.categorization.TokenListCategorizer; -import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; - -/** - * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Categorize}. - * This class is generated. Do not edit it. - */ -public final class CategorizeEvaluator implements EvalOperator.ExpressionEvaluator { - private final Source source; - - private final EvalOperator.ExpressionEvaluator v; - - private final CategorizationAnalyzer analyzer; - - private final TokenListCategorizer.CloseableTokenListCategorizer categorizer; - - private final DriverContext driverContext; - - private Warnings warnings; - - public CategorizeEvaluator(Source source, EvalOperator.ExpressionEvaluator v, - CategorizationAnalyzer analyzer, - TokenListCategorizer.CloseableTokenListCategorizer categorizer, DriverContext driverContext) { - this.source = source; - this.v = v; - this.analyzer = analyzer; - this.categorizer = categorizer; - this.driverContext = driverContext; - } - - @Override - public Block eval(Page page) { - try (BytesRefBlock vBlock = (BytesRefBlock) v.eval(page)) { - BytesRefVector vVector = vBlock.asVector(); - if (vVector == null) { - return eval(page.getPositionCount(), vBlock); - } - return eval(page.getPositionCount(), vVector).asBlock(); - } - } - - public IntBlock eval(int positionCount, BytesRefBlock vBlock) { - try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { - BytesRef vScratch = new BytesRef(); - position: for (int p = 0; p < positionCount; p++) { - if (vBlock.isNull(p)) { - result.appendNull(); - continue position; - } - if (vBlock.getValueCount(p) != 1) { - if (vBlock.getValueCount(p) > 1) { - warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); - } - result.appendNull(); - continue position; - } - result.appendInt(Categorize.process(vBlock.getBytesRef(vBlock.getFirstValueIndex(p), vScratch), this.analyzer, this.categorizer)); - } - return result.build(); - } - } - - public IntVector eval(int positionCount, BytesRefVector vVector) { - try(IntVector.FixedBuilder result = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { - BytesRef vScratch = new BytesRef(); - position: for (int p = 0; p < positionCount; p++) { - result.appendInt(p, Categorize.process(vVector.getBytesRef(p, vScratch), this.analyzer, this.categorizer)); - } - return result.build(); - } - } - - @Override - public String toString() { - return "CategorizeEvaluator[" + "v=" + v + "]"; - } - - @Override - public void close() { - Releasables.closeExpectNoException(v, analyzer, categorizer); - } - - private Warnings warnings() { - if (warnings == null) { - this.warnings = Warnings.createWarnings( - driverContext.warningsMode(), - source.source().getLineNumber(), - source.source().getColumnNumber(), - source.text() - ); - } - return warnings; - } - - static class Factory implements EvalOperator.ExpressionEvaluator.Factory { - private final Source source; - - private final EvalOperator.ExpressionEvaluator.Factory v; - - private final Function analyzer; - - private final Function categorizer; - - public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory v, - Function analyzer, - Function categorizer) { - this.source = source; - this.v = v; - this.analyzer = analyzer; - this.categorizer = categorizer; - } - - @Override - public CategorizeEvaluator get(DriverContext context) { - return new CategorizeEvaluator(source, v.get(context), analyzer.apply(context), categorizer.apply(context), context); - } - - @Override - public String toString() { - return "CategorizeEvaluator[" + "v=" + v + "]"; - } - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 0d6af0ec3bbb1..dc3329a906741 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -140,6 +140,12 @@ public enum Cap { */ CASE_MV, + /** + * Support for loading values over enrich. This is supported by all versions of ESQL but not + * the unit test CsvTests. + */ + ENRICH_LOAD, + /** * Optimization for ST_CENTROID changed some results in cartesian data. #108713 */ @@ -278,6 +284,11 @@ public enum Cap { */ RANGEQUERY_FOR_DATETIME, + /** + * Enforce strict type checking on ENRICH range types, and warnings for KEYWORD parsing at runtime. Done in #115091. + */ + ENRICH_STRICT_RANGE_TYPES, + /** * Fix for non-unique attribute names in ROW and logical plans. * https://github.com/elastic/elasticsearch/issues/110541 @@ -302,7 +313,7 @@ public enum Cap { /** * Support for match operator as a colon. Previous support for match operator as MATCH has been removed */ - MATCH_OPERATOR_COLON(Build.current().isSnapshot()), + MATCH_OPERATOR_COLON, /** * Removing support for the {@code META} keyword. @@ -317,32 +328,32 @@ public enum Cap { /** * Support for nanosecond dates as a data type */ - DATE_NANOS_TYPE(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + DATE_NANOS_TYPE(), /** * Support for to_date_nanos function */ - TO_DATE_NANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + TO_DATE_NANOS(), /** * Support for date nanos type in binary comparisons */ - DATE_NANOS_BINARY_COMPARISON(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + DATE_NANOS_BINARY_COMPARISON(), /** * Support Least and Greatest functions on Date Nanos type */ - LEAST_GREATEST_FOR_DATENANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + LEAST_GREATEST_FOR_DATENANOS(), /** * Support for date_trunc function on date nanos type */ - DATE_TRUNC_DATE_NANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + DATE_TRUNC_DATE_NANOS(), /** * support aggregations on date nanos */ - DATE_NANOS_AGGREGATIONS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + DATE_NANOS_AGGREGATIONS(), /** * Support for datetime in least and greatest functions @@ -392,7 +403,7 @@ public enum Cap { /** * Supported the text categorization function "CATEGORIZE". */ - CATEGORIZE(Build.current().isSnapshot()), + CATEGORIZE_V4(Build.current().isSnapshot()), /** * QSTR function @@ -404,6 +415,11 @@ public enum Cap { */ MATCH_FUNCTION, + /** + * KQL function + */ + KQL_FUNCTION(Build.current().isSnapshot()), + /** * Don't optimize CASE IS NOT NULL function by not requiring the fields to be not null as well. * https://github.com/elastic/elasticsearch/issues/112704 @@ -420,6 +436,12 @@ public enum Cap { */ SORTING_ON_SOURCE_AND_COUNTERS_FORBIDDEN, + /** + * Fix {@code SORT} when the {@code _source} field is not a sort key but + * is being returned. + */ + SORT_RETURNING_SOURCE_OK, + /** * Allow filter per individual aggregation. */ @@ -430,6 +452,11 @@ public enum Cap { */ PER_AGG_FILTERING_ORDS, + /** + * Support for {@code STD_DEV} aggregation. + */ + STD_DEV, + /** * Fix for https://github.com/elastic/elasticsearch/issues/114714 */ @@ -476,12 +503,45 @@ public enum Cap { ADD_LIMIT_INSIDE_MV_EXPAND, DELAY_DEBUG_FN(Build.current().isSnapshot()), + + /** Capability for remote metadata test */ + METADATA_FIELDS_REMOTE_TEST(false), /** * WIP on Join planning * - Introduce BinaryPlan and co * - Refactor INLINESTATS and LOOKUP as a JOIN block */ - JOIN_PLANNING_V1(Build.current().isSnapshot()); + JOIN_PLANNING_V1(Build.current().isSnapshot()), + + /** + * Support implicit casting from string literal to DATE_PERIOD or TIME_DURATION. + */ + IMPLICIT_CASTING_STRING_LITERAL_TO_TEMPORAL_AMOUNT, + + /** + * LOOKUP JOIN + */ + JOIN_LOOKUP_V3(Build.current().isSnapshot()), + + /** + * Fix for https://github.com/elastic/elasticsearch/issues/117054 + */ + FIX_NESTED_FIELDS_NAME_CLASH_IN_INDEXRESOLVER, + + /** + * support for aggregations on semantic_text + */ + SEMANTIC_TEXT_AGGREGATIONS(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG), + + /** + * Fix for https://github.com/elastic/elasticsearch/issues/114714, again + */ + FIX_STATS_BY_FOLDABLE_EXPRESSION_2, + + /** + * Support the "METADATA _score" directive to enable _score column. + */ + METADATA_SCORE(Build.current().isSnapshot()); private final boolean enabled; @@ -527,9 +587,6 @@ public static Set capabilities(boolean all) { for (NodeFeature feature : new EsqlFeatures().getFeatures()) { caps.add(cap(feature)); } - for (NodeFeature feature : new EsqlFeatures().getHistoricalFeatures().keySet()) { - caps.add(cap(feature)); - } return Set.copyOf(caps); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java index 80bb2afe57122..ba7a7e8266845 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java @@ -169,6 +169,17 @@ public TimeValue overallTook() { return overallTook; } + /** + * How much time the query took since starting. + */ + public TimeValue tookSoFar() { + if (relativeStartNanos == null) { + return new TimeValue(0); + } else { + return new TimeValue(System.nanoTime() - relativeStartNanos, TimeUnit.NANOSECONDS); + } + } + public Set clusterAliases() { return clusterInfo.keySet(); } @@ -478,7 +489,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field(STATUS_FIELD.getPreferredName(), getStatus().toString()); builder.field(INDICES_FIELD.getPreferredName(), indexExpression); - if (took != null) { + if (took != null && status != Status.RUNNING) { builder.field(TOOK.getPreferredName(), took.millis()); } if (totalShards != null) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index 4e59d5419fe6f..77aed298baea5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -196,8 +196,11 @@ public Iterator toXContentChunked(ToXContent.Params params } b.field("is_running", isRunning); } - if (executionInfo != null && executionInfo.overallTook() != null) { - b.field("took", executionInfo.overallTook().millis()); + if (executionInfo != null) { + long tookInMillis = executionInfo.overallTook() == null + ? executionInfo.tookSoFar().millis() + : executionInfo.overallTook().millis(); + b.field("took", tookInMillis); } if (dropNullColumns) { b.append(ResponseXContentUtils.allColumns(columns, "all_columns")) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java index b12cf4eb354bf..f896a25317102 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java @@ -17,6 +17,8 @@ public class EsqlQueryTask extends StoredAsyncTask { + private EsqlExecutionInfo executionInfo; + public EsqlQueryTask( long id, String type, @@ -29,10 +31,19 @@ public EsqlQueryTask( TimeValue keepAlive ) { super(id, type, action, description, parentTaskId, headers, originHeaders, asyncExecutionId, keepAlive); + this.executionInfo = null; + } + + public void setExecutionInfo(EsqlExecutionInfo executionInfo) { + this.executionInfo = executionInfo; + } + + public EsqlExecutionInfo executionInfo() { + return executionInfo; } @Override public EsqlQueryResponse getCurrentResult() { - return new EsqlQueryResponse(List.of(), List.of(), null, false, getExecutionId().getEncoded(), true, true, null); + return new EsqlQueryResponse(List.of(), List.of(), null, false, getExecutionId().getEncoded(), true, true, executionInfo); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9c173795d0ab1..b847508d2b161 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.esql.analysis; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.Column; @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; import org.elasticsearch.xpack.esql.core.expression.UnresolvedStar; -import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -49,6 +48,7 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionDefinition; import org.elasticsearch.xpack.esql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FoldablesConvertFunction; @@ -61,6 +61,8 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Drop; @@ -75,6 +77,12 @@ import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.Rename; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes.UsingJoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -86,6 +94,8 @@ import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import java.time.Duration; +import java.time.temporal.TemporalAmount; import java.util.ArrayList; import java.util.Arrays; import java.util.BitSet; @@ -102,11 +112,13 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.xpack.core.enrich.EnrichPolicy.GEO_MATCH_TYPE; import static org.elasticsearch.xpack.esql.core.type.DataType.BOOLEAN; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.FLOAT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; @@ -116,9 +128,11 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isTemporalAmount; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.LIMIT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.maybeParseTemporalAmount; /** * This class is part of the planner. Resolves references (such as variable and index names) and performs implicit casting. @@ -142,9 +156,14 @@ public class Analyzer extends ParameterizedRuleExecutor( "Resolution", + /* + * ImplicitCasting must be before ResolveRefs. Because a reference is created for a Bucket in Aggregate's aggregates, + * resolving this reference before implicit casting may cause this reference to have customMessage=true, it prevents further + * attempts to resolve this reference. + */ + new ImplicitCasting(), new ResolveRefs(), - new ResolveUnionTypes(), // Must be after ResolveRefs, so union types can be found - new ImplicitCasting() + new ResolveUnionTypes() // Must be after ResolveRefs, so union types can be found ); var finish = new Batch<>("Finish Analysis", Limiter.ONCE, new AddImplicitLimit(), new UnionTypesCleanup()); rules = List.of(init, resolution, finish); @@ -179,8 +198,12 @@ private static class ResolveTable extends ParameterizedAnalyzerRule maybeResolveAttribute(ua, childrenOutput)); } @@ -578,6 +604,71 @@ private LogicalPlan resolveLookup(Lookup l, List childrenOutput) { return l; } + private Join resolveLookupJoin(LookupJoin join) { + JoinConfig config = join.config(); + // for now, support only (LEFT) USING clauses + JoinType type = config.type(); + // rewrite the join into an equi-join between the field with the same name between left and right + if (type instanceof UsingJoinType using) { + List cols = using.columns(); + // the lookup cannot be resolved, bail out + if (Expressions.anyMatch(cols, c -> c instanceof UnresolvedAttribute ua && ua.customMessage())) { + return join; + } + + JoinType coreJoin = using.coreJoin(); + // verify the join type + if (coreJoin != JoinTypes.LEFT) { + String name = cols.get(0).name(); + UnresolvedAttribute errorAttribute = new UnresolvedAttribute( + join.source(), + name, + "Only LEFT join is supported with USING" + ); + return join.withConfig(new JoinConfig(type, singletonList(errorAttribute), emptyList(), emptyList())); + } + // resolve the using columns against the left and the right side then assemble the new join config + List leftKeys = resolveUsingColumns(cols, join.left().output(), "left"); + List rightKeys = resolveUsingColumns(cols, join.right().output(), "right"); + + config = new JoinConfig(coreJoin, leftKeys, leftKeys, rightKeys); + join = new LookupJoin(join.source(), join.left(), join.right(), config); + } + // everything else is unsupported for now + else { + UnresolvedAttribute errorAttribute = new UnresolvedAttribute(join.source(), "unsupported", "Unsupported join type"); + // add error message + return join.withConfig(new JoinConfig(type, singletonList(errorAttribute), emptyList(), emptyList())); + } + return join; + } + + private List resolveUsingColumns(List cols, List output, String side) { + List resolved = new ArrayList<>(cols.size()); + for (Attribute col : cols) { + if (col instanceof UnresolvedAttribute ua) { + Attribute resolvedCol = maybeResolveAttribute(ua, output); + if (resolvedCol instanceof UnresolvedAttribute ucol) { + String message = ua.unresolvedMessage(); + String match = "column [" + ucol.name() + "]"; + resolvedCol = ucol.withUnresolvedMessage(message.replace(match, match + "in " + side + " side of join")); + } + resolved.add(resolvedCol); + } + // columns are expected to be unresolved - if that's not the case return an error + else { + return singletonList( + new UnresolvedAttribute( + col.source(), + col.name(), + "Surprised to discover column [ " + col.name() + "] already resolved" + ) + ); + } + } + return resolved; + } + private Attribute maybeResolveAttribute(UnresolvedAttribute ua, List childrenOutput) { return maybeResolveAttribute(ua, childrenOutput, log); } @@ -952,13 +1043,15 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { } /** - * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison and In to desired data types. + * Cast string literals in ScalarFunction, EsqlArithmeticOperation, BinaryComparison, In and GroupingFunction to desired data types. * For example, the string literals in the following expressions will be cast implicitly to the field data type on the left hand side. * date > "2024-08-21" * date in ("2024-08-21", "2024-08-22", "2024-08-23") * date = "2024-08-21" + 3 days * ip == "127.0.0.1" * version != "1.0" + * bucket(dateField, "1 month") + * date_trunc("1 minute", dateField) * * If the inputs to Coalesce are mixed numeric types, cast the rest of the numeric field or value to the first numeric data type if * applicable. For example, implicit casting converts: @@ -972,15 +1065,18 @@ private BitSet gatherPreAnalysisMetrics(LogicalPlan plan, BitSet b) { private static class ImplicitCasting extends ParameterizedRule { @Override public LogicalPlan apply(LogicalPlan plan, AnalyzerContext context) { - return plan.transformExpressionsUp(ScalarFunction.class, e -> ImplicitCasting.cast(e, context.functionRegistry())); + return plan.transformExpressionsUp( + org.elasticsearch.xpack.esql.core.expression.function.Function.class, + e -> ImplicitCasting.cast(e, context.functionRegistry()) + ); } - private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) { + private static Expression cast(org.elasticsearch.xpack.esql.core.expression.function.Function f, EsqlFunctionRegistry registry) { if (f instanceof In in) { return processIn(in); } - if (f instanceof EsqlScalarFunction esf) { - return processScalarFunction(esf, registry); + if (f instanceof EsqlScalarFunction || f instanceof GroupingFunction) { // exclude AggregateFunction until it is needed + return processScalarOrGroupingFunction(f, registry); } if (f instanceof EsqlArithmeticOperation || f instanceof BinaryComparison) { return processBinaryOperator((BinaryOperator) f); @@ -988,7 +1084,10 @@ private static Expression cast(ScalarFunction f, EsqlFunctionRegistry registry) return f; } - private static Expression processScalarFunction(EsqlScalarFunction f, EsqlFunctionRegistry registry) { + private static Expression processScalarOrGroupingFunction( + org.elasticsearch.xpack.esql.core.expression.function.Function f, + EsqlFunctionRegistry registry + ) { List args = f.arguments(); List targetDataTypes = registry.getDataTypeForStringLiteralConversion(f.getClass()); if (targetDataTypes == null || targetDataTypes.isEmpty()) { @@ -1011,9 +1110,11 @@ private static Expression processScalarFunction(EsqlScalarFunction f, EsqlFuncti } if (targetDataType != DataType.NULL && targetDataType != DataType.UNSUPPORTED) { Expression e = castStringLiteral(arg, targetDataType); - childrenChanged = true; - newChildren.add(e); - continue; + if (e != arg) { + childrenChanged = true; + newChildren.add(e); + continue; + } } } } else if (dataType.isNumeric() && canCastMixedNumericTypes(f) && castNumericArgs) { @@ -1095,7 +1196,7 @@ private static Expression processIn(In in) { return childrenChanged ? in.replaceChildren(newChildren) : in; } - private static boolean canCastMixedNumericTypes(EsqlScalarFunction f) { + private static boolean canCastMixedNumericTypes(org.elasticsearch.xpack.esql.core.expression.function.Function f) { return f instanceof Coalesce; } @@ -1142,19 +1243,37 @@ private static boolean supportsStringImplicitCasting(DataType type) { return type == DATETIME || type == IP || type == VERSION || type == BOOLEAN; } - public static Expression castStringLiteral(Expression from, DataType target) { + private static UnresolvedAttribute unresolvedAttribute(Expression value, String type, Exception e) { + String message = format( + "Cannot convert string [{}] to [{}], error [{}]", + value.fold(), + type, + (e instanceof ParsingException pe) ? pe.getErrorMessage() : e.getMessage() + ); + return new UnresolvedAttribute(value.source(), String.valueOf(value.fold()), message); + } + + private static Expression castStringLiteralToTemporalAmount(Expression from) { + try { + TemporalAmount result = maybeParseTemporalAmount(from.fold().toString().strip()); + if (result == null) { + return from; + } + DataType target = result instanceof Duration ? TIME_DURATION : DATE_PERIOD; + return new Literal(from.source(), result, target); + } catch (Exception e) { + return unresolvedAttribute(from, DATE_PERIOD + " or " + TIME_DURATION, e); + } + } + + private static Expression castStringLiteral(Expression from, DataType target) { assert from.foldable(); try { - Object to = EsqlDataTypeConverter.convert(from.fold(), target); - return new Literal(from.source(), to, target); + return isTemporalAmount(target) + ? castStringLiteralToTemporalAmount(from) + : new Literal(from.source(), EsqlDataTypeConverter.convert(from.fold(), target), target); } catch (Exception e) { - String message = LoggerMessageFormat.format( - "Cannot convert string [{}] to [{}], error [{}]", - from.fold(), - target, - e.getMessage() - ); - return new UnresolvedAttribute(from.source(), String.valueOf(from.fold()), message); + return unresolvedAttribute(from, target.toString(), e); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java index 6eb572ae96f4d..c6f24d3abf393 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/AnalyzerContext.java @@ -15,5 +15,17 @@ public record AnalyzerContext( Configuration configuration, EsqlFunctionRegistry functionRegistry, IndexResolution indexResolution, + IndexResolution lookupResolution, EnrichResolution enrichResolution -) {} +) { + // Currently for tests only, since most do not test lookups + // TODO: make this even simpler, remove the enrichResolution for tests that do not require it (most tests) + public AnalyzerContext( + Configuration configuration, + EsqlFunctionRegistry functionRegistry, + IndexResolution indexResolution, + EnrichResolution enrichResolution + ) { + this(configuration, functionRegistry, indexResolution, IndexResolution.invalid(""), enrichResolution); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java index 9d7c5e141a2b1..460d30618df79 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/PreAnalyzer.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; @@ -22,14 +23,16 @@ public class PreAnalyzer { public static class PreAnalysis { - public static final PreAnalysis EMPTY = new PreAnalysis(emptyList(), emptyList()); + public static final PreAnalysis EMPTY = new PreAnalysis(emptyList(), emptyList(), emptyList()); public final List indices; public final List enriches; + public final List lookupIndices; - public PreAnalysis(List indices, List enriches) { + public PreAnalysis(List indices, List enriches, List lookupIndices) { this.indices = indices; this.enriches = enriches; + this.lookupIndices = lookupIndices; } } @@ -44,13 +47,17 @@ public PreAnalysis preAnalyze(LogicalPlan plan) { protected PreAnalysis doPreAnalyze(LogicalPlan plan) { List indices = new ArrayList<>(); List unresolvedEnriches = new ArrayList<>(); + List lookupIndices = new ArrayList<>(); - plan.forEachUp(UnresolvedRelation.class, p -> indices.add(new TableInfo(p.table()))); + plan.forEachUp(UnresolvedRelation.class, p -> { + List list = p.indexMode() == IndexMode.LOOKUP ? lookupIndices : indices; + list.add(new TableInfo(p.table())); + }); plan.forEachUp(Enrich.class, unresolvedEnriches::add); // mark plan as preAnalyzed (if it were marked, there would be no analysis) plan.forEachUp(LogicalPlan::setPreAnalyzed); - return new PreAnalysis(indices, unresolvedEnriches); + return new PreAnalysis(indices, unresolvedEnriches, lookupIndices); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 632f52d163349..5f8c011cff53a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.capabilities.Unresolvable; @@ -17,6 +19,8 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; +import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; import org.elasticsearch.xpack.esql.core.expression.function.Function; @@ -31,8 +35,10 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.FilteredExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; +import org.elasticsearch.xpack.esql.expression.function.fulltext.Kql; import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; import org.elasticsearch.xpack.esql.expression.function.fulltext.QueryString; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; @@ -50,16 +56,19 @@ import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.stats.Metrics; import java.util.ArrayList; import java.util.BitSet; import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -78,9 +87,11 @@ public class Verifier { private final Metrics metrics; + private final XPackLicenseState licenseState; - public Verifier(Metrics metrics) { + public Verifier(Metrics metrics, XPackLicenseState licenseState) { this.metrics = metrics; + this.licenseState = licenseState; } /** @@ -164,6 +175,20 @@ else if (p instanceof Lookup lookup) { else { lookup.matchFields().forEach(unresolvedExpressions); } + } else if (p instanceof LookupJoin lj) { + // expect right side to always be a lookup index + lj.right().forEachUp(EsRelation.class, r -> { + if (r.indexMode() != IndexMode.LOOKUP) { + failures.add( + fail( + r, + "LOOKUP JOIN right side [{}] must be a lookup index (index_mode=lookup, not [{}]", + r.index().name(), + r.indexMode().getName() + ) + ); + } + }); } else { @@ -192,10 +217,16 @@ else if (p instanceof Lookup lookup) { checkOperationsOnUnsignedLong(p, failures); checkBinaryComparison(p, failures); checkForSortableDataTypes(p, failures); + checkSort(p, failures); checkFullTextQueryFunctions(p, failures); }); checkRemoteEnrich(plan, failures); + checkMetadataScoreNameReserved(plan, failures); + + if (failures.isEmpty()) { + checkLicense(plan, licenseState, failures); + } // gather metrics if (failures.isEmpty()) { @@ -205,6 +236,25 @@ else if (p instanceof Lookup lookup) { return failures; } + private static void checkMetadataScoreNameReserved(LogicalPlan p, Set failures) { + // _score can only be set as metadata attribute + if (p.inputSet().stream().anyMatch(a -> MetadataAttribute.SCORE.equals(a.name()) && (a instanceof MetadataAttribute) == false)) { + failures.add(fail(p, "`" + MetadataAttribute.SCORE + "` is a reserved METADATA attribute")); + } + } + + private void checkSort(LogicalPlan p, Set failures) { + if (p instanceof OrderBy ob) { + ob.order().forEach(o -> { + o.forEachDown(Function.class, f -> { + if (f instanceof AggregateFunction) { + failures.add(fail(f, "Aggregate functions are not allowed in SORT [{}]", f.functionName())); + } + }); + }); + } + } + private static void checkFilterConditionType(LogicalPlan p, Set localFailures) { if (p instanceof Filter f) { Expression condition = f.condition(); @@ -271,6 +321,7 @@ private static void checkAggregate(LogicalPlan p, Set failures) { r -> failures.add(fail(r, "the rate aggregate[{}] can only be used within the metrics command", r.sourceText())) ); } + checkCategorizeGrouping(agg, failures); } else { p.forEachExpression( GroupingFunction.class, @@ -279,6 +330,74 @@ private static void checkAggregate(LogicalPlan p, Set failures) { } } + /** + * Check CATEGORIZE grouping function usages. + *

+ * Some of those checks are temporary, until the required syntax or engine changes are implemented. + *

+ */ + private static void checkCategorizeGrouping(Aggregate agg, Set failures) { + // Forbid CATEGORIZE grouping function with other groupings + if (agg.groupings().size() > 1) { + agg.groupings().forEach(g -> { + g.forEachDown( + Categorize.class, + categorize -> failures.add( + fail(categorize, "cannot use CATEGORIZE grouping function [{}] with multiple groupings", categorize.sourceText()) + ) + ); + }); + } + + // Forbid CATEGORIZE grouping functions not being top level groupings + agg.groupings().forEach(g -> { + // Check all CATEGORIZE but the top level one + Alias.unwrap(g) + .children() + .forEach( + child -> child.forEachDown( + Categorize.class, + c -> failures.add( + fail(c, "CATEGORIZE grouping function [{}] can't be used within other expressions", c.sourceText()) + ) + ) + ); + }); + + // Forbid CATEGORIZE being used in the aggregations + agg.aggregates().forEach(a -> { + a.forEachDown( + Categorize.class, + categorize -> failures.add( + fail(categorize, "cannot use CATEGORIZE grouping function [{}] within the aggregations", categorize.sourceText()) + ) + ); + }); + + // Forbid CATEGORIZE being referenced in the aggregation functions + Map categorizeByAliasId = new HashMap<>(); + agg.groupings().forEach(g -> { + g.forEachDown(Alias.class, alias -> { + if (alias.child() instanceof Categorize categorize) { + categorizeByAliasId.put(alias.id(), categorize); + } + }); + }); + agg.aggregates() + .forEach(a -> a.forEachDown(AggregateFunction.class, aggregate -> aggregate.forEachDown(Attribute.class, attribute -> { + var categorize = categorizeByAliasId.get(attribute.id()); + if (categorize != null) { + failures.add( + fail( + attribute, + "cannot reference CATEGORIZE grouping function [{}] within the aggregations", + attribute.sourceText() + ) + ); + } + }))); + } + private static void checkRateAggregates(Expression expr, int nestedLevel, Set failures) { if (expr instanceof AggregateFunction) { nestedLevel++; @@ -415,7 +534,7 @@ private static void checkRow(LogicalPlan p, Set failures) { if (p instanceof Row row) { row.fields().forEach(a -> { if (DataType.isRepresentable(a.dataType()) == false) { - failures.add(fail(a, "cannot use [{}] directly in a row assignment", a.child().sourceText())); + failures.add(fail(a.child(), "cannot use [{}] directly in a row assignment", a.child().sourceText())); } }); } @@ -473,6 +592,14 @@ private static void checkBinaryComparison(LogicalPlan p, Set failures) }); } + private void checkLicense(LogicalPlan plan, XPackLicenseState licenseState, Set failures) { + plan.forEachExpressionDown(Function.class, p -> { + if (p.checkLicense(licenseState) == false) { + failures.add(new Failure(p, "current license is non-compliant for function [" + p.sourceText() + "]")); + } + }); + } + private void gatherMetrics(LogicalPlan plan, BitSet b) { plan.forEachDown(p -> FeatureMetric.set(p, b)); for (int i = b.nextSetBit(0); i >= 0; i = b.nextSetBit(i + 1)) { @@ -676,19 +803,24 @@ private static void checkNotPresentInDisjunctions( private static void checkFullTextQueryFunctions(LogicalPlan plan, Set failures) { if (plan instanceof Filter f) { Expression condition = f.condition(); - checkCommandsBeforeExpression( - plan, - condition, - QueryString.class, - lp -> (lp instanceof Filter || lp instanceof OrderBy || lp instanceof EsRelation), - qsf -> "[" + qsf.functionName() + "] " + qsf.functionType(), - failures - ); + + List.of(QueryString.class, Kql.class).forEach(functionClass -> { + // Check for limitations of QSTR and KQL function. + checkCommandsBeforeExpression( + plan, + condition, + functionClass, + lp -> (lp instanceof Filter || lp instanceof OrderBy || lp instanceof EsRelation), + fullTextFunction -> "[" + fullTextFunction.functionName() + "] " + fullTextFunction.functionType(), + failures + ); + }); + checkCommandsBeforeExpression( plan, condition, Match.class, - lp -> (lp instanceof Limit == false), + lp -> (lp instanceof Limit == false) && (lp instanceof Aggregate == false), m -> "[" + m.functionName() + "] " + m.functionType(), failures ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java index 286ddbaa29a5b..e52e9ae989a92 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -41,6 +41,7 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OutputOperator; +import org.elasticsearch.compute.operator.Warnings; import org.elasticsearch.compute.operator.lookup.EnrichQuerySourceOperator; import org.elasticsearch.compute.operator.lookup.MergePositionsOperator; import org.elasticsearch.compute.operator.lookup.QueryList; @@ -78,6 +79,7 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; import org.elasticsearch.xpack.esql.planner.PlannerUtils; @@ -166,6 +168,10 @@ abstract class AbstractLookupService list releasables.add(mergePositionsOperator); SearchExecutionContext searchExecutionContext = searchContext.getSearchExecutionContext(); QueryList queryList = queryList(request, searchExecutionContext, inputBlock, request.inputDataType); + var warnings = Warnings.createWarnings( + DriverContext.WarningsMode.COLLECT, + request.source.source().getLineNumber(), + request.source.source().getColumnNumber(), + request.source.text() + ); var queryOperator = new EnrichQuerySourceOperator( driverContext.blockFactory(), EnrichQuerySourceOperator.DEFAULT_MAX_PAGE_SIZE, queryList, - searchExecutionContext.getIndexReader() + searchExecutionContext.getIndexReader(), + warnings ); releasables.add(queryOperator); var extractFieldsOperator = extractFieldsOperator(searchContext, driverContext, request.extractFields); @@ -447,13 +460,22 @@ abstract static class Request { final DataType inputDataType; final Page inputPage; final List extractFields; + final Source source; - Request(String sessionId, String index, DataType inputDataType, Page inputPage, List extractFields) { + Request( + String sessionId, + String index, + DataType inputDataType, + Page inputPage, + List extractFields, + Source source + ) { this.sessionId = sessionId; this.index = index; this.inputDataType = inputDataType; this.inputPage = inputPage; this.extractFields = extractFields; + this.source = source; } } @@ -467,6 +489,7 @@ abstract static class TransportRequest extends org.elasticsearch.transport.Trans final DataType inputDataType; final Page inputPage; final List extractFields; + final Source source; // TODO: Remove this workaround once we have Block RefCount final Page toRelease; final RefCounted refs = AbstractRefCounted.of(this::releasePage); @@ -477,7 +500,8 @@ abstract static class TransportRequest extends org.elasticsearch.transport.Trans DataType inputDataType, Page inputPage, Page toRelease, - List extractFields + List extractFields, + Source source ) { this.sessionId = sessionId; this.shardId = shardId; @@ -485,6 +509,7 @@ abstract static class TransportRequest extends org.elasticsearch.transport.Trans this.inputPage = inputPage; this.toRelease = toRelease; this.extractFields = extractFields; + this.source = source; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java index 6e5845fae33b7..df608a04632a2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupOperator.java @@ -16,9 +16,11 @@ import org.elasticsearch.compute.operator.AsyncOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.compute.operator.ResponseHeadersCollector; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import java.io.IOException; @@ -35,6 +37,8 @@ public final class EnrichLookupOperator extends AsyncOperator { private final String matchType; private final String matchField; private final List enrichFields; + private final ResponseHeadersCollector responseHeadersCollector; + private final Source source; private long totalTerms = 0L; public record Factory( @@ -47,7 +51,8 @@ public record Factory( String enrichIndex, String matchType, String matchField, - List enrichFields + List enrichFields, + Source source ) implements OperatorFactory { @Override public String describe() { @@ -75,7 +80,8 @@ public Operator get(DriverContext driverContext) { enrichIndex, matchType, matchField, - enrichFields + enrichFields, + source ); } } @@ -91,7 +97,8 @@ public EnrichLookupOperator( String enrichIndex, String matchType, String matchField, - List enrichFields + List enrichFields, + Source source ) { super(driverContext, maxOutstandingRequests); this.sessionId = sessionId; @@ -103,6 +110,8 @@ public EnrichLookupOperator( this.matchType = matchType; this.matchField = matchField; this.enrichFields = enrichFields; + this.source = source; + this.responseHeadersCollector = new ResponseHeadersCollector(enrichLookupService.getThreadContext()); } @Override @@ -116,9 +125,14 @@ protected void performAsync(Page inputPage, ActionListener listener) { matchType, matchField, new Page(inputBlock), - enrichFields + enrichFields, + source + ); + enrichLookupService.lookupAsync( + request, + parentTask, + ActionListener.runBefore(listener.map(inputPage::appendPage), responseHeadersCollector::collect) ); - enrichLookupService.lookupAsync(request, parentTask, listener.map(inputPage::appendPage)); } @Override @@ -140,6 +154,7 @@ public String toString() { protected void doClose() { // TODO: Maybe create a sub-task as the parent task of all the lookup tasks // then cancel it when this operator terminates early (e.g., have enough result). + responseHeadersCollector.finish(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 2d85b46e33a8c..50a1ffce4841f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -18,6 +18,8 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.lookup.QueryList; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.RangeFieldMapper; +import org.elasticsearch.index.mapper.RangeType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchService; @@ -27,6 +29,7 @@ import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -71,13 +74,15 @@ protected TransportRequest transportRequest(EnrichLookupService.Request request, request.matchField, request.inputPage, null, - request.extractFields + request.extractFields, + request.source ); } @Override protected QueryList queryList(TransportRequest request, SearchExecutionContext context, Block inputBlock, DataType inputDataType) { MappedFieldType fieldType = context.getFieldType(request.matchField); + validateTypes(inputDataType, fieldType); return switch (request.matchType) { case "match", "range" -> termQueryList(fieldType, context, inputBlock, inputDataType); case "geo_match" -> QueryList.geoShapeQueryList(fieldType, context, inputBlock); @@ -85,6 +90,33 @@ protected QueryList queryList(TransportRequest request, SearchExecutionContext c }; } + private static void validateTypes(DataType inputDataType, MappedFieldType fieldType) { + if (fieldType instanceof RangeFieldMapper.RangeFieldType rangeType) { + // For range policy types, the ENRICH index field type will be one of a list of supported range types, + // which need to match the input data type (eg. ip-range -> ip, date-range -> date, etc.) + if (rangeTypesCompatible(rangeType.rangeType(), inputDataType) == false) { + throw new EsqlIllegalArgumentException( + "ENRICH range and input types are incompatible: range[" + rangeType.rangeType() + "], input[" + inputDataType + "]" + ); + } + } + // For match policies, the ENRICH index field will always be KEYWORD, and input type will be converted to KEYWORD. + // For geo_match, type validation is done earlier, in the Analyzer. + } + + private static boolean rangeTypesCompatible(RangeType rangeType, DataType inputDataType) { + if (inputDataType.noText() == DataType.KEYWORD) { + // We allow runtime parsing of string types to numeric types + return true; + } + return switch (rangeType) { + case INTEGER, LONG -> inputDataType.isWholeNumber(); + case IP -> inputDataType == DataType.IP; + case DATE -> inputDataType.isDate(); + default -> rangeType.isNumeric() == inputDataType.isNumeric(); + }; + } + public static class Request extends AbstractLookupService.Request { private final String matchType; private final String matchField; @@ -96,9 +128,10 @@ public static class Request extends AbstractLookupService.Request { String matchType, String matchField, Page inputPage, - List extractFields + List extractFields, + Source source ) { - super(sessionId, index, inputDataType, inputPage, extractFields); + super(sessionId, index, inputDataType, inputPage, extractFields, source); this.matchType = matchType; this.matchField = matchField; } @@ -116,9 +149,10 @@ protected static class TransportRequest extends AbstractLookupService.TransportR String matchField, Page inputPage, Page toRelease, - List extractFields + List extractFields, + Source source ) { - super(sessionId, shardId, inputDataType, inputPage, toRelease, extractFields); + super(sessionId, shardId, inputDataType, inputPage, toRelease, extractFields, source); this.matchType = matchType; this.matchField = matchField; } @@ -138,6 +172,10 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro } PlanStreamInput planIn = new PlanStreamInput(in, in.namedWriteableRegistry(), null); List extractFields = planIn.readNamedWriteableCollectionAsList(NamedExpression.class); + var source = Source.EMPTY; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ENRICH_RUNTIME_WARNINGS)) { + source = Source.readFrom(planIn); + } TransportRequest result = new TransportRequest( sessionId, shardId, @@ -146,7 +184,8 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro matchField, inputPage, inputPage, - extractFields + extractFields, + source ); result.setParentTask(parentTaskId); return result; @@ -165,6 +204,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeWriteable(inputPage); PlanStreamOutput planOut = new PlanStreamOutput(out, null); planOut.writeNamedWriteableCollection(extractFields); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ENRICH_RUNTIME_WARNINGS)) { + source.writeTo(planOut); + } } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java index 836b400c54f8c..f09f7d0e23e7b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexOperator.java @@ -19,6 +19,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import java.io.IOException; @@ -36,7 +37,8 @@ public record Factory( DataType inputDataType, String lookupIndex, String matchField, - List loadFields + List loadFields, + Source source ) implements OperatorFactory { @Override public String describe() { @@ -63,7 +65,8 @@ public Operator get(DriverContext driverContext) { inputDataType, lookupIndex, matchField, - loadFields + loadFields, + source ); } } @@ -76,6 +79,7 @@ public Operator get(DriverContext driverContext) { private final String lookupIndex; private final String matchField; private final List loadFields; + private final Source source; private long totalTerms = 0L; public LookupFromIndexOperator( @@ -88,7 +92,8 @@ public LookupFromIndexOperator( DataType inputDataType, String lookupIndex, String matchField, - List loadFields + List loadFields, + Source source ) { super(driverContext, maxOutstandingRequests); this.sessionId = sessionId; @@ -99,6 +104,7 @@ public LookupFromIndexOperator( this.lookupIndex = lookupIndex; this.matchField = matchField; this.loadFields = loadFields; + this.source = source; } @Override @@ -111,7 +117,8 @@ protected void performAsync(Page inputPage, ActionListener listener) { inputDataType, matchField, new Page(inputBlock), - loadFields + loadFields, + source ); lookupService.lookupAsync(request, parentTask, listener.map(inputPage::appendPage)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java index ef204e88c234f..849e8e890e248 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.enrich; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -25,6 +26,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -68,7 +70,8 @@ protected TransportRequest transportRequest(LookupFromIndexService.Request reque request.inputPage, null, request.extractFields, - request.matchField + request.matchField, + request.source ); } @@ -87,9 +90,10 @@ public static class Request extends AbstractLookupService.Request { DataType inputDataType, String matchField, Page inputPage, - List extractFields + List extractFields, + Source source ) { - super(sessionId, index, inputDataType, inputPage, extractFields); + super(sessionId, index, inputDataType, inputPage, extractFields, source); this.matchField = matchField; } } @@ -104,9 +108,10 @@ protected static class TransportRequest extends AbstractLookupService.TransportR Page inputPage, Page toRelease, List extractFields, - String matchField + String matchField, + Source source ) { - super(sessionId, shardId, inputDataType, inputPage, toRelease, extractFields); + super(sessionId, shardId, inputDataType, inputPage, toRelease, extractFields, source); this.matchField = matchField; } @@ -122,6 +127,10 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro PlanStreamInput planIn = new PlanStreamInput(in, in.namedWriteableRegistry(), null); List extractFields = planIn.readNamedWriteableCollectionAsList(NamedExpression.class); String matchField = in.readString(); + var source = Source.EMPTY; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ENRICH_RUNTIME_WARNINGS)) { + source = Source.readFrom(planIn); + } TransportRequest result = new TransportRequest( sessionId, shardId, @@ -129,7 +138,8 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro inputPage, inputPage, extractFields, - matchField + matchField, + source ); result.setParentTask(parentTaskId); return result; @@ -145,6 +155,9 @@ public void writeTo(StreamOutput out) throws IOException { PlanStreamOutput planOut = new PlanStreamOutput(out, null); planOut.writeNamedWriteableCollection(extractFields); out.writeString(matchField); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ENRICH_RUNTIME_WARNINGS)) { + source.writeTo(planOut); + } } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index 816388193c5f6..c1269009c6a41 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.indices.IndicesExpressionGrouper; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; @@ -40,13 +41,13 @@ public class PlanExecutor { private final Verifier verifier; private final PlanningMetricsManager planningMetricsManager; - public PlanExecutor(IndexResolver indexResolver, MeterRegistry meterRegistry) { + public PlanExecutor(IndexResolver indexResolver, MeterRegistry meterRegistry, XPackLicenseState licenseState) { this.indexResolver = indexResolver; this.preAnalyzer = new PreAnalyzer(); this.functionRegistry = new EsqlFunctionRegistry(); this.mapper = new Mapper(); this.metrics = new Metrics(functionRegistry); - this.verifier = new Verifier(metrics); + this.verifier = new Verifier(metrics, licenseState); this.planningMetricsManager = new PlanningMetricsManager(meterRegistry); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java new file mode 100644 index 0000000000000..7e2de0094c2ab --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/ExpressionWritables.java @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.ExpressionCoreWritables; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateWritables; +import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextWritables; +import org.elasticsearch.xpack.esql.expression.function.scalar.ScalarFunctionWritables; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FromBase64; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBase64; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDateNanos; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToRadians; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToUnsignedLong; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Acos; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Asin; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cbrt; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Ceil; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cos; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cosh; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Exp; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Floor; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Log10; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Signum; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sin; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sinh; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sqrt; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tan; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tanh; +import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvFunctionWritables; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialContains; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StDistance; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; +import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.RTrim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Space; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Trim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; +import org.elasticsearch.xpack.esql.expression.function.scalar.util.Delay; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; + +import java.util.ArrayList; +import java.util.List; + +public class ExpressionWritables { + + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + + entries.addAll(allExpressions()); + entries.addAll(aggregates()); + entries.addAll(scalars()); + entries.addAll(spatials()); + entries.addAll(arithmetics()); + entries.addAll(binaryComparisons()); + entries.addAll(fullText()); + entries.addAll(unaryScalars()); + return entries; + } + + public static List attributes() { + List entries = new ArrayList<>(); + entries.addAll(ExpressionCoreWritables.attributes()); + entries.add(UnsupportedAttribute.ENTRY); + return entries; + } + + public static List namedExpressions() { + List entries = new ArrayList<>(); + entries.addAll(ExpressionCoreWritables.namedExpressions()); + entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); + return entries; + } + + public static List expressions() { + List entries = new ArrayList<>(); + entries.addAll(ExpressionCoreWritables.expressions()); + entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); + entries.add(Order.ENTRY); + return entries; + } + + public static List allExpressions() { + List entries = new ArrayList<>(); + entries.addAll(expressions()); + entries.addAll(namedExpressions()); + entries.addAll(attributes()); + return entries; + } + + public static List aggregates() { + return AggregateWritables.getNamedWriteables(); + } + + public static List scalars() { + return ScalarFunctionWritables.getNamedWriteables(); + } + + public static List unaryScalars() { + List entries = new ArrayList<>(); + entries.add(Abs.ENTRY); + entries.add(Acos.ENTRY); + entries.add(Asin.ENTRY); + entries.add(Atan.ENTRY); + entries.add(ByteLength.ENTRY); + entries.add(Cbrt.ENTRY); + entries.add(Ceil.ENTRY); + entries.add(Cos.ENTRY); + entries.add(Cosh.ENTRY); + entries.add(Exp.ENTRY); + entries.add(Floor.ENTRY); + entries.add(FromBase64.ENTRY); + entries.add(IsNotNull.ENTRY); + entries.add(IsNull.ENTRY); + entries.add(Length.ENTRY); + entries.add(Log10.ENTRY); + entries.add(LTrim.ENTRY); + entries.add(Neg.ENTRY); + entries.add(Not.ENTRY); + entries.add(RLike.ENTRY); + entries.add(RTrim.ENTRY); + entries.add(Signum.ENTRY); + entries.add(Sin.ENTRY); + entries.add(Sinh.ENTRY); + entries.add(Space.ENTRY); + entries.add(Sqrt.ENTRY); + entries.add(StX.ENTRY); + entries.add(StY.ENTRY); + entries.add(Tan.ENTRY); + entries.add(Tanh.ENTRY); + entries.add(ToBase64.ENTRY); + entries.add(ToBoolean.ENTRY); + entries.add(ToCartesianPoint.ENTRY); + entries.add(ToDatetime.ENTRY); + entries.add(ToDateNanos.ENTRY); + entries.add(ToDegrees.ENTRY); + entries.add(ToDouble.ENTRY); + entries.add(ToGeoShape.ENTRY); + entries.add(ToCartesianShape.ENTRY); + entries.add(ToGeoPoint.ENTRY); + entries.add(ToIP.ENTRY); + entries.add(ToInteger.ENTRY); + entries.add(ToLong.ENTRY); + entries.add(ToRadians.ENTRY); + entries.add(ToString.ENTRY); + entries.add(ToUnsignedLong.ENTRY); + entries.add(ToVersion.ENTRY); + entries.add(Trim.ENTRY); + entries.add(WildcardLike.ENTRY); + entries.add(Delay.ENTRY); + // mv functions + entries.addAll(MvFunctionWritables.getNamedWriteables()); + return entries; + } + + private static List spatials() { + return List.of(SpatialContains.ENTRY, SpatialDisjoint.ENTRY, SpatialIntersects.ENTRY, SpatialWithin.ENTRY, StDistance.ENTRY); + } + + private static List arithmetics() { + return List.of(Add.ENTRY, Div.ENTRY, Mod.ENTRY, Mul.ENTRY, Sub.ENTRY); + } + + private static List binaryComparisons() { + return List.of(Equals.ENTRY, GreaterThan.ENTRY, GreaterThanOrEqual.ENTRY, LessThan.ENTRY, LessThanOrEqual.ENTRY, NotEquals.ENTRY); + } + + private static List fullText() { + return FullTextWritables.getNamedWriteables(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index d1aef0e46caca..3d26bc170b723 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -28,10 +28,12 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; +import org.elasticsearch.xpack.esql.expression.function.aggregate.StdDev; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.esql.expression.function.aggregate.Top; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import org.elasticsearch.xpack.esql.expression.function.aggregate.WeightedAvg; +import org.elasticsearch.xpack.esql.expression.function.fulltext.Kql; import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; import org.elasticsearch.xpack.esql.expression.function.fulltext.QueryString; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; @@ -160,27 +162,30 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_SHAPE; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.IP; -import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.TEXT; +import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; +import static org.elasticsearch.xpack.esql.core.type.DataType.isString; public class EsqlFunctionRegistry { - private static final Map, List> dataTypesForStringLiteralConversion = new LinkedHashMap<>(); + private static final Map, List> DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS = new LinkedHashMap<>(); - private static final Map dataTypeCastingPriority; + private static final Map DATA_TYPE_CASTING_PRIORITY; static { List typePriorityList = Arrays.asList( DATETIME, + DATE_PERIOD, + TIME_DURATION, DOUBLE, LONG, INTEGER, @@ -194,9 +199,9 @@ public class EsqlFunctionRegistry { UNSIGNED_LONG, UNSUPPORTED ); - dataTypeCastingPriority = new HashMap<>(); + DATA_TYPE_CASTING_PRIORITY = new HashMap<>(); for (int i = 0; i < typePriorityList.size(); i++) { - dataTypeCastingPriority.put(typePriorityList.get(i), i); + DATA_TYPE_CASTING_PRIORITY.put(typePriorityList.get(i), i); } } @@ -257,7 +262,7 @@ public Collection listFunctions(String pattern) { .collect(toList()); } - private FunctionDefinition[][] functions() { + private static FunctionDefinition[][] functions() { return new FunctionDefinition[][] { // grouping functions new FunctionDefinition[] { def(Bucket.class, Bucket::new, "bucket", "bin"), }, @@ -273,6 +278,7 @@ private FunctionDefinition[][] functions() { def(MedianAbsoluteDeviation.class, uni(MedianAbsoluteDeviation::new), "median_absolute_deviation"), def(Min.class, uni(Min::new), "min"), def(Percentile.class, bi(Percentile::new), "percentile"), + def(StdDev.class, uni(StdDev::new), "std_dev"), def(Sum.class, uni(Sum::new), "sum"), def(Top.class, tri(Top::new), "top"), def(Values.class, uni(Values::new), "values"), @@ -406,6 +412,7 @@ private static FunctionDefinition[][] snapshotFunctions() { // This is an experimental function and can be removed without notice. def(Delay.class, Delay::new, "delay"), def(Categorize.class, Categorize::new, "categorize"), + def(Kql.class, Kql::new, "kql"), def(Rate.class, Rate::withUnresolvedTimestamp, "rate") } }; } @@ -437,6 +444,11 @@ public static String normalizeName(String name) { } public record ArgSignature(String name, String[] type, String description, boolean optional, DataType targetDataType) { + + public ArgSignature(String name, String[] type, String description, boolean optional) { + this(name, type, description, optional, UNSUPPORTED); + } + @Override public String toString() { return "ArgSignature{" @@ -477,17 +489,26 @@ public List argDescriptions() { } } - public static DataType getTargetType(String[] names) { + /** + * Build a list target data types, which is used by ImplicitCasting to convert string literals to a target data type. + */ + private static DataType getTargetType(String[] names) { List types = new ArrayList<>(); for (String name : names) { - types.add(DataType.fromEs(name)); - } - if (types.contains(KEYWORD) || types.contains(TEXT)) { - return UNSUPPORTED; + DataType type = DataType.fromTypeName(name); + if (type != null && type != UNSUPPORTED) { // A type should not be null or UNSUPPORTED, just a sanity check here + // If the function takes strings as input, there is no need to cast a string literal to it. + // Return UNSUPPORTED means that ImplicitCasting doesn't support this argument, and it will be skipped by ImplicitCasting. + if (isString(type)) { + return UNSUPPORTED; + } + types.add(type); + } } return types.stream() - .min((dt1, dt2) -> dataTypeCastingPriority.get(dt1).compareTo(dataTypeCastingPriority.get(dt2))) + .filter(DATA_TYPE_CASTING_PRIORITY::containsKey) + .min((dt1, dt2) -> DATA_TYPE_CASTING_PRIORITY.get(dt1).compareTo(DATA_TYPE_CASTING_PRIORITY.get(dt2))) .orElse(UNSUPPORTED); } @@ -559,7 +580,7 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr for (FunctionDefinition[] group : groupFunctions) { for (FunctionDefinition def : group) { FunctionDescription signature = description(def); - dataTypesForStringLiteralConversion.put( + DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS.put( def.clazz(), signature.args().stream().map(EsqlFunctionRegistry.ArgSignature::targetDataType).collect(Collectors.toList()) ); @@ -568,7 +589,7 @@ private void buildDataTypesForStringLiteralConversion(FunctionDefinition[]... gr } public List getDataTypeForStringLiteralConversion(Class clazz) { - return dataTypesForStringLiteralConversion.get(clazz); + return DATA_TYPES_FOR_STRING_LITERAL_CONVERSIONS.get(clazz); } private static class SnapshotFunctionRegistry extends EsqlFunctionRegistry { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java index f7a74cc2ae93f..87efccfc90ab3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.aggregate; import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -30,27 +29,6 @@ * A type of {@code Function} that takes multiple values and extracts a single value out of them. For example, {@code AVG()}. */ public abstract class AggregateFunction extends Function { - public static List getNamedWriteables() { - return List.of( - Avg.ENTRY, - Count.ENTRY, - CountDistinct.ENTRY, - Max.ENTRY, - Median.ENTRY, - MedianAbsoluteDeviation.ENTRY, - Min.ENTRY, - Percentile.ENTRY, - Rate.ENTRY, - SpatialCentroid.ENTRY, - Sum.ENTRY, - Top.ENTRY, - Values.ENTRY, - // internal functions - ToPartial.ENTRY, - FromPartial.ENTRY, - WeightedAvg.ENTRY - ); - } private final Expression field; private final List parameters; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java new file mode 100644 index 0000000000000..d74b5c8b386b8 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +import java.util.List; + +public class AggregateWritables { + + public static List getNamedWriteables() { + return List.of( + Avg.ENTRY, + Count.ENTRY, + CountDistinct.ENTRY, + Max.ENTRY, + Median.ENTRY, + MedianAbsoluteDeviation.ENTRY, + Min.ENTRY, + Percentile.ENTRY, + Rate.ENTRY, + SpatialCentroid.ENTRY, + StdDev.ENTRY, + Sum.ENTRY, + Top.ENTRY, + Values.ENTRY, + // internal functions + ToPartial.ENTRY, + FromPartial.ENTRY, + WeightedAvg.ENTRY + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java index fa8a9e7d8c837..3a0d616d407a3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java @@ -58,7 +58,9 @@ public class Count extends AggregateFunction implements ToAggregator, SurrogateE ), @Example( description = "To count the same stream of data based on two different expressions " - + "use the pattern `COUNT( OR NULL)`", + + "use the pattern `COUNT( OR NULL)`. This builds on the three-valued logic " + + "({wikipedia}/Three-valued_logic[3VL]) of the language: `TRUE OR NULL` is `TRUE`, but `FALSE OR NULL` is `NULL`, " + + "plus the way COUNT handles `NULL`s: `COUNT(TRUE)` and `COUNT(FALSE)` are both 1, but `COUNT(NULL)` is 0.", file = "stats", tag = "count-or-null" ) } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 5ae162f1fbb12..2e45b1c1fe082 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -66,7 +66,8 @@ public class CountDistinct extends AggregateFunction implements OptionalArgument Map.entry(DataType.KEYWORD, CountDistinctBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.IP, CountDistinctBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.VERSION, CountDistinctBytesRefAggregatorFunctionSupplier::new), - Map.entry(DataType.TEXT, CountDistinctBytesRefAggregatorFunctionSupplier::new) + Map.entry(DataType.TEXT, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.SEMANTIC_TEXT, CountDistinctBytesRefAggregatorFunctionSupplier::new) ); private static final int DEFAULT_PRECISION = 3000; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index ac2d4ff3cbc43..eb0c8abd1080b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -51,11 +51,12 @@ public class Max extends AggregateFunction implements ToAggregator, SurrogateExp Map.entry(DataType.IP, MaxIpAggregatorFunctionSupplier::new), Map.entry(DataType.KEYWORD, MaxBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.TEXT, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.SEMANTIC_TEXT, MaxBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.VERSION, MaxBytesRefAggregatorFunctionSupplier::new) ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "date_nanos", "ip", "keyword", "long", "version" }, description = "The maximum value of a field.", isAggregation = true, examples = { @@ -72,7 +73,7 @@ public Max( Source source, @Param( name = "field", - type = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" } + type = { "boolean", "double", "integer", "long", "date", "date_nanos", "ip", "keyword", "text", "long", "version" } ) Expression field ) { this(source, field, Literal.TRUE); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index a5fc8196847b7..472f0b1ff5cd1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -51,11 +51,12 @@ public class Min extends AggregateFunction implements ToAggregator, SurrogateExp Map.entry(DataType.IP, MinIpAggregatorFunctionSupplier::new), Map.entry(DataType.VERSION, MinBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.KEYWORD, MinBytesRefAggregatorFunctionSupplier::new), - Map.entry(DataType.TEXT, MinBytesRefAggregatorFunctionSupplier::new) + Map.entry(DataType.TEXT, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.SEMANTIC_TEXT, MinBytesRefAggregatorFunctionSupplier::new) ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "date_nanos", "ip", "keyword", "long", "version" }, description = "The minimum value of a field.", isAggregation = true, examples = { @@ -72,7 +73,7 @@ public Min( Source source, @Param( name = "field", - type = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" } + type = { "boolean", "double", "integer", "long", "date", "date_nanos", "ip", "keyword", "text", "long", "version" } ) Expression field ) { this(source, field, Literal.TRUE); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/StdDev.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/StdDev.java new file mode 100644 index 0000000000000..189b6a81912cb --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/StdDev.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.StdDevDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.StdDevIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.StdDevLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; + +public class StdDev extends AggregateFunction implements ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "StdDev", StdDev::new); + + @FunctionInfo( + returnType = "double", + description = "The standard deviation of a numeric field.", + isAggregation = true, + examples = { + @Example(file = "stats", tag = "stdev"), + @Example( + description = "The expression can use inline functions. For example, to calculate the standard " + + "deviation of each employee's maximum salary changes, first use `MV_MAX` on each row, " + + "and then use `STD_DEV` on the result", + file = "stats", + tag = "docsStatsStdDevNestedExpression" + ) } + ) + public StdDev(Source source, @Param(name = "number", type = { "double", "integer", "long" }) Expression field) { + this(source, field, Literal.TRUE); + } + + public StdDev(Source source, Expression field, Expression filter) { + super(source, field, filter, emptyList()); + } + + private StdDev(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected Expression.TypeResolution resolveType() { + return isType( + field(), + dt -> dt.isNumeric() && dt != DataType.UNSIGNED_LONG, + sourceText(), + DEFAULT, + "numeric except unsigned_long or counter types" + ); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StdDev::new, field(), filter()); + } + + @Override + public StdDev replaceChildren(List newChildren) { + return new StdDev(source(), newChildren.get(0), newChildren.get(1)); + } + + public StdDev withFilter(Expression filter) { + return new StdDev(source(), field(), filter); + } + + @Override + public final AggregatorFunctionSupplier supplier(List inputChannels) { + DataType type = field().dataType(); + if (type == DataType.LONG) { + return new StdDevLongAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.INTEGER) { + return new StdDevIntAggregatorFunctionSupplier(inputChannels); + } + if (type == DataType.DOUBLE) { + return new StdDevDoubleAggregatorFunctionSupplier(inputChannels); + } + throw EsqlIllegalArgumentException.illegalDataType(type); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index 111eab051719b..5260b3e8fa279 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -46,13 +46,14 @@ public class Values extends AggregateFunction implements ToAggregator { Map.entry(DataType.DOUBLE, ValuesDoubleAggregatorFunctionSupplier::new), Map.entry(DataType.KEYWORD, ValuesBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.TEXT, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.SEMANTIC_TEXT, ValuesBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.IP, ValuesBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.VERSION, ValuesBytesRefAggregatorFunctionSupplier::new), Map.entry(DataType.BOOLEAN, ValuesBooleanAggregatorFunctionSupplier::new) ); @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, preview = true, description = "Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. " + "If you need the values returned in order use <>.", @@ -70,7 +71,10 @@ public class Values extends AggregateFunction implements ToAggregator { ) public Values( Source source, - @Param(name = "field", type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }) Expression v + @Param( + name = "field", + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" } + ) Expression v ) { this(source, v, Literal.TRUE); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java index 4106df331d101..9f08401a42dd1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/package-info.java @@ -94,7 +94,7 @@ * {@link org.elasticsearch.common.io.stream.NamedWriteable#writeTo}, * and a deserializing constructor. Then add an {@link org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry} * constant and add that constant to the list in - * {@link org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction#getNamedWriteables}. + * {@link org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateWritables#getNamedWriteables}. * *
  • * Do the same with {@link org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry}. diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java index 1a3667de992cd..9addf08e1b5f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextFunction.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function.fulltext; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; @@ -29,9 +28,6 @@ * {@link org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer} to rewrite them into Lucene queries. */ public abstract class FullTextFunction extends Function { - public static List getNamedWriteables() { - return List.of(QueryString.ENTRY, Match.ENTRY); - } private final Expression query; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextWritables.java new file mode 100644 index 0000000000000..8804a031de78c --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/FullTextWritables.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.fulltext; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.MultiMatchQueryPredicate; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class FullTextWritables { + + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + + entries.add(MatchQueryPredicate.ENTRY); + entries.add(MultiMatchQueryPredicate.ENTRY); + entries.add(QueryString.ENTRY); + entries.add(Match.ENTRY); + + if (EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()) { + entries.add(Kql.ENTRY); + } + + return Collections.unmodifiableList(entries); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java new file mode 100644 index 0000000000000..c03902373c02e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Kql.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.fulltext; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.querydsl.query.KqlQuery; + +import java.io.IOException; +import java.util.List; + +/** + * Full text function that performs a {@link KqlQuery} . + */ +public class Kql extends FullTextFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Kql", Kql::new); + + @FunctionInfo( + returnType = "boolean", + preview = true, + description = "Performs a KQL query. Returns true if the provided KQL query string matches the row.", + examples = { @Example(file = "kql-function", tag = "kql-with-field") } + ) + public Kql( + Source source, + @Param( + name = "query", + type = { "keyword", "text" }, + description = "Query string in KQL query string format." + ) Expression queryString + ) { + super(source, queryString, List.of(queryString)); + } + + private Kql(StreamInput in) throws IOException { + this(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source().writeTo(out); + out.writeNamedWriteable(query()); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new Kql(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Kql::new, query()); + } + +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java index 75a9883a77102..63b5073c2217a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java @@ -7,20 +7,10 @@ package org.elasticsearch.xpack.esql.expression.function.grouping; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.BytesRefHash; -import org.elasticsearch.compute.ann.Evaluator; -import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; -import org.elasticsearch.index.analysis.CharFilterFactory; -import org.elasticsearch.index.analysis.CustomAnalyzer; -import org.elasticsearch.index.analysis.TokenFilterFactory; -import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.xpack.esql.capabilities.Validatable; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; @@ -29,10 +19,6 @@ import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; -import org.elasticsearch.xpack.ml.aggs.categorization.CategorizationBytesRefHash; -import org.elasticsearch.xpack.ml.aggs.categorization.CategorizationPartOfSpeechDictionary; -import org.elasticsearch.xpack.ml.aggs.categorization.TokenListCategorizer; -import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; import java.io.IOException; import java.util.List; @@ -42,16 +28,12 @@ /** * Categorizes text messages. - * - * This implementation is incomplete and comes with the following caveats: - * - it only works correctly on a single node. - * - when running on multiple nodes, category IDs of the different nodes are - * aggregated, even though the same ID can correspond to a totally different - * category - * - the output consists of category IDs, which should be replaced by category - * regexes or keys - * - * TODO(jan, nik): fix this + *

    + * This function has no evaluators, as it works like an aggregation (Accumulates values, stores intermediate states, etc). + *

    + *

    + * For the implementation, see {@link org.elasticsearch.compute.aggregation.blockhash.CategorizeBlockHash} + *

    */ public class Categorize extends GroupingFunction implements Validatable { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( @@ -62,7 +44,7 @@ public class Categorize extends GroupingFunction implements Validatable { private final Expression field; - @FunctionInfo(returnType = { "integer" }, description = "Categorizes text messages.") + @FunctionInfo(returnType = "keyword", description = "Categorizes text messages.") public Categorize( Source source, @Param(name = "field", type = { "text", "keyword" }, description = "Expression to categorize") Expression field @@ -88,43 +70,13 @@ public String getWriteableName() { @Override public boolean foldable() { - return field.foldable(); - } - - @Evaluator - static int process( - BytesRef v, - @Fixed(includeInToString = false, build = true) CategorizationAnalyzer analyzer, - @Fixed(includeInToString = false, build = true) TokenListCategorizer.CloseableTokenListCategorizer categorizer - ) { - String s = v.utf8ToString(); - try (TokenStream ts = analyzer.tokenStream("text", s)) { - return categorizer.computeCategory(ts, s.length(), 1).getId(); - } catch (IOException e) { - throw new RuntimeException(e); - } + // Categorize cannot be currently folded + return false; } @Override public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { - return new CategorizeEvaluator.Factory( - source(), - toEvaluator.apply(field), - context -> new CategorizationAnalyzer( - // TODO(jan): get the correct analyzer in here, see CategorizationAnalyzerConfig::buildStandardCategorizationAnalyzer - new CustomAnalyzer( - TokenizerFactory.newFactory("whitespace", WhitespaceTokenizer::new), - new CharFilterFactory[0], - new TokenFilterFactory[0] - ), - true - ), - context -> new TokenListCategorizer.CloseableTokenListCategorizer( - new CategorizationBytesRefHash(new BytesRefHash(2048, context.bigArrays())), - CategorizationPartOfSpeechDictionary.getInstance(), - 0.70f - ) - ); + throw new UnsupportedOperationException("CATEGORIZE is only evaluated during aggregations"); } @Override @@ -134,11 +86,11 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return DataType.INTEGER; + return DataType.KEYWORD; } @Override - public Expression replaceChildren(List newChildren) { + public Categorize replaceChildren(List newChildren) { return new Categorize(source(), newChildren.get(0)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/GroupingWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/GroupingWritables.java new file mode 100644 index 0000000000000..89b9036e97e3a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/GroupingWritables.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.grouping; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +import java.util.List; + +public class GroupingWritables { + + public static List getNamedWriteables() { + return List.of(Bucket.ENTRY, Categorize.ENTRY); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java index 65985f234ac92..404ce7e3900c9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlScalarFunction.java @@ -7,57 +7,11 @@ package org.elasticsearch.xpack.esql.expression.function.scalar; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.function.scalar.ScalarFunction; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.FullTextPredicate; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; -import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; -import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; -import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; -import org.elasticsearch.xpack.esql.expression.function.scalar.date.Now; -import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; -import org.elasticsearch.xpack.esql.expression.function.scalar.ip.IpPrefix; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan2; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.E; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Hypot; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Log; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pi; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tau; -import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.BinarySpatialFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Repeat; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Replace; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Reverse; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Right; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; -import java.util.ArrayList; import java.util.List; /** @@ -71,56 +25,6 @@ *

    */ public abstract class EsqlScalarFunction extends ScalarFunction implements EvaluatorMapper { - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - entries.add(And.ENTRY); - entries.add(Atan2.ENTRY); - entries.add(BitLength.ENTRY); - entries.add(Bucket.ENTRY); - entries.add(Case.ENTRY); - entries.add(Categorize.ENTRY); - entries.add(CIDRMatch.ENTRY); - entries.add(Coalesce.ENTRY); - entries.add(Concat.ENTRY); - entries.add(E.ENTRY); - entries.add(EndsWith.ENTRY); - entries.add(Greatest.ENTRY); - entries.add(Hypot.ENTRY); - entries.add(In.ENTRY); - entries.add(InsensitiveEquals.ENTRY); - entries.add(DateExtract.ENTRY); - entries.add(DateDiff.ENTRY); - entries.add(DateFormat.ENTRY); - entries.add(DateParse.ENTRY); - entries.add(DateTrunc.ENTRY); - entries.add(IpPrefix.ENTRY); - entries.add(Least.ENTRY); - entries.add(Left.ENTRY); - entries.add(Locate.ENTRY); - entries.add(Log.ENTRY); - entries.add(Now.ENTRY); - entries.add(Or.ENTRY); - entries.add(Pi.ENTRY); - entries.add(Pow.ENTRY); - entries.add(Right.ENTRY); - entries.add(Repeat.ENTRY); - entries.add(Replace.ENTRY); - entries.add(Reverse.ENTRY); - entries.add(Round.ENTRY); - entries.add(Split.ENTRY); - entries.add(Substring.ENTRY); - entries.add(StartsWith.ENTRY); - entries.add(Tau.ENTRY); - entries.add(ToLower.ENTRY); - entries.add(ToUpper.ENTRY); - entries.addAll(BinarySpatialFunction.getNamedWriteables()); - entries.addAll(EsqlArithmeticOperation.getNamedWriteables()); - entries.addAll(EsqlBinaryComparison.getNamedWriteables()); - entries.addAll(FullTextPredicate.getNamedWriteables()); - entries.addAll(UnaryScalarFunction.getNamedWriteables()); - return entries; - } - protected EsqlScalarFunction(Source source) { super(source); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java new file mode 100644 index 0000000000000..192ca6c43e57d --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ScalarFunctionWritables.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; +import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; +import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingWritables; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; +import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateDiff; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateParse; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; +import org.elasticsearch.xpack.esql.expression.function.scalar.date.Now; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; +import org.elasticsearch.xpack.esql.expression.function.scalar.ip.IpPrefix; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan2; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.E; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Hypot; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Log; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pi; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tau; +import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Locate; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Repeat; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Replace; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Reverse; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Right; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; + +import java.util.ArrayList; +import java.util.List; + +public class ScalarFunctionWritables { + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.add(And.ENTRY); + entries.add(Atan2.ENTRY); + entries.add(BitLength.ENTRY); + entries.add(Case.ENTRY); + entries.add(CIDRMatch.ENTRY); + entries.add(Coalesce.ENTRY); + entries.add(Concat.ENTRY); + entries.add(E.ENTRY); + entries.add(EndsWith.ENTRY); + entries.add(Greatest.ENTRY); + entries.add(Hypot.ENTRY); + entries.add(In.ENTRY); + entries.add(InsensitiveEquals.ENTRY); + entries.add(DateExtract.ENTRY); + entries.add(DateDiff.ENTRY); + entries.add(DateFormat.ENTRY); + entries.add(DateParse.ENTRY); + entries.add(DateTrunc.ENTRY); + entries.add(IpPrefix.ENTRY); + entries.add(Least.ENTRY); + entries.add(Left.ENTRY); + entries.add(Locate.ENTRY); + entries.add(Log.ENTRY); + entries.add(Now.ENTRY); + entries.add(Or.ENTRY); + entries.add(Pi.ENTRY); + entries.add(Pow.ENTRY); + entries.add(Right.ENTRY); + entries.add(Repeat.ENTRY); + entries.add(Replace.ENTRY); + entries.add(Reverse.ENTRY); + entries.add(Round.ENTRY); + entries.add(Split.ENTRY); + entries.add(Substring.ENTRY); + entries.add(StartsWith.ENTRY); + entries.add(Tau.ENTRY); + entries.add(ToLower.ENTRY); + entries.add(ToUpper.ENTRY); + + entries.addAll(GroupingWritables.getNamedWriteables()); + return entries; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 610fe1c5ea000..d2af110a5203f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -7,130 +7,20 @@ package org.elasticsearch.xpack.esql.expression.function.scalar; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; -import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FromBase64; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBase64; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDateNanos; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToRadians; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToUnsignedLong; -import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToVersion; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Acos; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Asin; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Atan; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cbrt; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Ceil; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cos; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cosh; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Exp; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Floor; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Log10; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Signum; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sin; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sinh; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Sqrt; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tan; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Tanh; -import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; -import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.RTrim; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Space; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.Trim; -import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; -import org.elasticsearch.xpack.esql.expression.function.scalar.util.Delay; -import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.List; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isNumeric; public abstract class UnaryScalarFunction extends EsqlScalarFunction { - public static List getNamedWriteables() { - List entries = new ArrayList<>(); - entries.add(Abs.ENTRY); - entries.add(Acos.ENTRY); - entries.add(Asin.ENTRY); - entries.add(Atan.ENTRY); - entries.add(ByteLength.ENTRY); - entries.add(Cbrt.ENTRY); - entries.add(Ceil.ENTRY); - entries.add(Cos.ENTRY); - entries.add(Cosh.ENTRY); - entries.add(Exp.ENTRY); - entries.add(Floor.ENTRY); - entries.add(FromBase64.ENTRY); - entries.add(IsNotNull.ENTRY); - entries.add(IsNull.ENTRY); - entries.add(Length.ENTRY); - entries.add(Log10.ENTRY); - entries.add(LTrim.ENTRY); - entries.add(Neg.ENTRY); - entries.add(Not.ENTRY); - entries.add(RLike.ENTRY); - entries.add(RTrim.ENTRY); - entries.add(Signum.ENTRY); - entries.add(Sin.ENTRY); - entries.add(Sinh.ENTRY); - entries.add(Space.ENTRY); - entries.add(Sqrt.ENTRY); - entries.add(StX.ENTRY); - entries.add(StY.ENTRY); - entries.add(Tan.ENTRY); - entries.add(Tanh.ENTRY); - entries.add(ToBase64.ENTRY); - entries.add(ToBoolean.ENTRY); - entries.add(ToCartesianPoint.ENTRY); - entries.add(ToDatetime.ENTRY); - entries.add(ToDateNanos.ENTRY); - entries.add(ToDegrees.ENTRY); - entries.add(ToDouble.ENTRY); - entries.add(ToGeoShape.ENTRY); - entries.add(ToCartesianShape.ENTRY); - entries.add(ToGeoPoint.ENTRY); - entries.add(ToIP.ENTRY); - entries.add(ToInteger.ENTRY); - entries.add(ToLong.ENTRY); - entries.add(ToRadians.ENTRY); - entries.add(ToString.ENTRY); - entries.add(ToUnsignedLong.ENTRY); - entries.add(ToVersion.ENTRY); - entries.add(Trim.ENTRY); - entries.add(WildcardLike.ENTRY); - entries.add(Delay.ENTRY); - entries.addAll(AbstractMultivalueFunction.getNamedWriteables()); - return entries; - } - protected final Expression field; public UnaryScalarFunction(Source source, Expression field) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java index 6e38d72500840..a35b67d7ac3fd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java @@ -62,7 +62,7 @@ public interface DateTruncFactoryProvider { protected static final ZoneId DEFAULT_TZ = ZoneOffset.UTC; @FunctionInfo( - returnType = "date", + returnType = { "date", "date_nanos" }, description = "Rounds down a date to the closest interval.", examples = { @Example(file = "date", tag = "docsDateTrunc"), @@ -83,7 +83,7 @@ public DateTrunc( type = { "date_period", "time_duration" }, description = "Interval; expressed using the timespan literal syntax." ) Expression interval, - @Param(name = "date", type = { "date" }, description = "Date expression") Expression field + @Param(name = "date", type = { "date", "date_nanos" }, description = "Date expression") Expression field ) { super(source, List.of(interval, field)); this.interval = interval; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java index 6a3b58728b192..a32761cfd9948 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; @@ -22,7 +21,6 @@ import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; -import java.util.List; /** * Base class for functions that reduce multivalued fields into single valued fields. @@ -32,27 +30,6 @@ *

    */ public abstract class AbstractMultivalueFunction extends UnaryScalarFunction { - public static List getNamedWriteables() { - return List.of( - MvAppend.ENTRY, - MvAvg.ENTRY, - MvConcat.ENTRY, - MvCount.ENTRY, - MvDedupe.ENTRY, - MvFirst.ENTRY, - MvLast.ENTRY, - MvMax.ENTRY, - MvMedian.ENTRY, - MvMedianAbsoluteDeviation.ENTRY, - MvMin.ENTRY, - MvPercentile.ENTRY, - MvPSeriesWeightedSum.ENTRY, - MvSlice.ENTRY, - MvSort.ENTRY, - MvSum.ENTRY, - MvZip.ENTRY - ); - } protected AbstractMultivalueFunction(Source source, Expression field) { super(source, field); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFunctionWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFunctionWritables.java new file mode 100644 index 0000000000000..7f8fcd910ad6d --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFunctionWritables.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +import java.util.List; + +public class MvFunctionWritables { + public static List getNamedWriteables() { + return List.of( + MvAppend.ENTRY, + MvAvg.ENTRY, + MvConcat.ENTRY, + MvCount.ENTRY, + MvDedupe.ENTRY, + MvFirst.ENTRY, + MvLast.ENTRY, + MvMax.ENTRY, + MvMedian.ENTRY, + MvMedianAbsoluteDeviation.ENTRY, + MvMin.ENTRY, + MvPercentile.ENTRY, + MvPSeriesWeightedSum.ENTRY, + MvSlice.ENTRY, + MvSort.ENTRY, + MvSum.ENTRY, + MvZip.ENTRY + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java index 46538b77edc74..eccc7ee4672c9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java @@ -112,7 +112,7 @@ * {@link org.elasticsearch.common.io.stream.NamedWriteable#writeTo}, * and a deserializing constructor. Then add an {@link org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry} * constant and register it. To register it, look for a method like - * {@link org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction#getNamedWriteables()} + * {@link org.elasticsearch.xpack.esql.expression.function.scalar.ScalarFunctionWritables#getNamedWriteables()} * in your function's class hierarchy. Keep going up until you hit a function with that name. * Then add your new "ENTRY" constant to the list it returns. *
  • diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java index 8839244e6c601..4d08b0e9687ec 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/BinarySpatialFunction.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.spatial; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.geometry.Geometry; @@ -22,7 +21,6 @@ import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import java.io.IOException; -import java.util.List; import java.util.Objects; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; @@ -40,9 +38,6 @@ * and of compatible CRS. For example geo_point and geo_shape can be compared, but not geo_point and cartesian_point. */ public abstract class BinarySpatialFunction extends BinaryScalarFunction implements SpatialEvaluatorFactory.SpatialSourceResolution { - public static List getNamedWriteables() { - return List.of(SpatialContains.ENTRY, SpatialDisjoint.ENTRY, SpatialIntersects.ENTRY, SpatialWithin.ENTRY, StDistance.ENTRY); - } private final SpatialTypeResolver spatialTypeResolver; private SpatialCrsType crsType; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index 62201bcfa858d..74394d796855f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -21,7 +20,6 @@ import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import java.io.IOException; -import java.util.List; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.xpack.esql.core.type.DataType.DOUBLE; @@ -31,9 +29,6 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.commonType; public abstract class EsqlArithmeticOperation extends ArithmeticOperation implements EvaluatorMapper { - public static List getNamedWriteables() { - return List.of(Add.ENTRY, Div.ENTRY, Mod.ENTRY, Mul.ENTRY, Sub.ENTRY); - } /** * The only role of this enum is to fit the super constructor that expects a BinaryOperation which is diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java index db771a6354883..cbbf87fb6c4cb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -25,7 +24,6 @@ import java.io.IOException; import java.time.ZoneId; -import java.util.List; import java.util.Map; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -33,9 +31,6 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.commonType; public abstract class EsqlBinaryComparison extends BinaryComparison implements EvaluatorMapper { - public static List getNamedWriteables() { - return List.of(Equals.ENTRY, GreaterThan.ENTRY, GreaterThanOrEqual.ENTRY, LessThan.ENTRY, LessThanOrEqual.ENTRY, NotEquals.ENTRY); - } private final Map evaluatorMap; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java index 56ade3982e0d8..3ae7bd93092ef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java @@ -53,12 +53,12 @@ public LessThan( Source source, @Param( name = "lhs", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + type = { "boolean", "date_nanos", "date", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, description = "An expression." ) Expression left, @Param( name = "rhs", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + type = { "boolean", "date_nanos", "date", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, description = "An expression." ) Expression right ) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java index b2eaefcf09d65..88366bbf9a7c3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/IndexResolution.java @@ -12,22 +12,37 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.Set; public final class IndexResolution { - public static IndexResolution valid(EsIndex index, Map unavailableClusters) { + /** + * @param index EsIndex encapsulating requested index expression, resolved mappings and index modes from field-caps. + * @param resolvedIndices Set of concrete indices resolved by field-caps. (This information is not always present in the EsIndex). + * @param unavailableClusters Remote clusters that could not be contacted during planning + * @return valid IndexResolution + */ + public static IndexResolution valid( + EsIndex index, + Set resolvedIndices, + Map unavailableClusters + ) { Objects.requireNonNull(index, "index must not be null if it was found"); + Objects.requireNonNull(resolvedIndices, "resolvedIndices must not be null"); Objects.requireNonNull(unavailableClusters, "unavailableClusters must not be null"); - return new IndexResolution(index, null, unavailableClusters); + return new IndexResolution(index, null, resolvedIndices, unavailableClusters); } + /** + * Use this method only if the set of concrete resolved indices is the same as EsIndex#concreteIndices(). + */ public static IndexResolution valid(EsIndex index) { - return valid(index, Collections.emptyMap()); + return valid(index, index.concreteIndices(), Collections.emptyMap()); } public static IndexResolution invalid(String invalid) { Objects.requireNonNull(invalid, "invalid must not be null to signal that the index is invalid"); - return new IndexResolution(null, invalid, Collections.emptyMap()); + return new IndexResolution(null, invalid, Collections.emptySet(), Collections.emptyMap()); } public static IndexResolution notFound(String name) { @@ -39,12 +54,20 @@ public static IndexResolution notFound(String name) { @Nullable private final String invalid; + // all indices found by field-caps + private final Set resolvedIndices; // remote clusters included in the user's index expression that could not be connected to private final Map unavailableClusters; - private IndexResolution(EsIndex index, @Nullable String invalid, Map unavailableClusters) { + private IndexResolution( + EsIndex index, + @Nullable String invalid, + Set resolvedIndices, + Map unavailableClusters + ) { this.index = index; this.invalid = invalid; + this.resolvedIndices = resolvedIndices; this.unavailableClusters = unavailableClusters; } @@ -64,8 +87,8 @@ public EsIndex get() { } /** - * Is the index valid for use with ql? Returns {@code false} if the - * index wasn't found. + * Is the index valid for use with ql? + * @return {@code false} if the index wasn't found. */ public boolean isValid() { return invalid == null; @@ -75,10 +98,17 @@ public boolean isValid() { * @return Map of unavailable clusters (could not be connected to during field-caps query). Key of map is cluster alias, * value is the {@link FieldCapabilitiesFailure} describing the issue. */ - public Map getUnavailableClusters() { + public Map unavailableClusters() { return unavailableClusters; } + /** + * @return all indices found by field-caps (regardless of whether they had any mappings) + */ + public Set resolvedIndices() { + return resolvedIndices; + } + @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -87,16 +117,29 @@ public boolean equals(Object obj) { IndexResolution other = (IndexResolution) obj; return Objects.equals(index, other.index) && Objects.equals(invalid, other.invalid) + && Objects.equals(resolvedIndices, other.resolvedIndices) && Objects.equals(unavailableClusters, other.unavailableClusters); } @Override public int hashCode() { - return Objects.hash(index, invalid, unavailableClusters); + return Objects.hash(index, invalid, resolvedIndices, unavailableClusters); } @Override public String toString() { - return invalid != null ? invalid : index.name(); + return invalid != null + ? invalid + : "IndexResolution{" + + "index=" + + index + + ", invalid='" + + invalid + + '\'' + + ", resolvedIndices=" + + resolvedIndices + + ", unavailableClusters=" + + unavailableClusters + + '}'; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index 1e1cc3b86a9d5..47e5b9acfbf9d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.session.Configuration; @@ -160,7 +161,7 @@ public Block[] readCachedBlockArray() throws IOException { @Override public String sourceText() { - return configuration.query(); + return configuration == null ? Source.EMPTY.text() : configuration.query(); } static void throwOnNullOptionalRead(Class type) throws IOException { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index a0e257d1a8953..a5f97cf961378 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.CombineProjections; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConstantFolding; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ConvertStringToByteRef; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ExtractAggregateCommonFilter; import org.elasticsearch.xpack.esql.optimizer.rules.logical.FoldNull; import org.elasticsearch.xpack.esql.optimizer.rules.logical.LiteralsOnTheRight; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PartiallyFoldCase; @@ -46,6 +47,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLimitAndSortAsTopN; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceOrderByExpressionWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceRegexMatch; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceRowAsLocalRelation; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsFilteredAggWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceTrivialTypeConversions; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SetAsOptimized; @@ -124,8 +126,9 @@ protected static Batch substitutions() { "Substitutions", Limiter.ONCE, new SubstituteSurrogatePlans(), - // translate filtered expressions into aggregate with filters - can't use surrogate expressions because it was - // retrofitted for constant folding - this needs to be fixed + // Translate filtered expressions into aggregate with filters - can't use surrogate expressions because it was + // retrofitted for constant folding - this needs to be fixed. + // Needs to occur before ReplaceAggregateAggExpressionWithEval, which will update the functions, losing the filter. new SubstituteFilteredExpression(), new RemoveStatsOverride(), // first extract nested expressions inside aggs @@ -170,8 +173,10 @@ protected static Batch operators() { new BooleanFunctionEqualsElimination(), new CombineBinaryComparisons(), new CombineDisjunctions(), + // TODO: bifunction can now (since we now have just one data types set) be pushed into the rule new SimplifyComparisonsArithmetics(DataType::areCompatible), new ReplaceStatsFilteredAggWithEval(), + new ExtractAggregateCommonFilter(), // prune/elimination new PruneFilters(), new PruneColumns(), @@ -188,6 +193,6 @@ protected static Batch operators() { } protected static Batch cleanup() { - return new Batch<>("Clean Up", new ReplaceLimitAndSortAsTopN()); + return new Batch<>("Clean Up", new ReplaceLimitAndSortAsTopN(), new ReplaceRowAsLocalRelation()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java index 482a89b50c865..ee192c2420da8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalOptimizerRules.java @@ -7,8 +7,6 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.util.ReflectionUtils; import org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules.TransformDirection; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.rule.ParameterizedRule; @@ -62,34 +60,4 @@ public final PhysicalPlan apply(PhysicalPlan plan) { protected abstract PhysicalPlan rule(SubPlan plan); } - - public abstract static class OptimizerExpressionRule extends Rule { - - private final TransformDirection direction; - // overriding type token which returns the correct class but does an uncheck cast to LogicalPlan due to its generic bound - // a proper solution is to wrap the Expression rule into a Plan rule but that would affect the rule declaration - // so instead this is hacked here - private final Class expressionTypeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); - - public OptimizerExpressionRule(TransformDirection direction) { - this.direction = direction; - } - - @Override - public final PhysicalPlan apply(PhysicalPlan plan) { - return direction == TransformDirection.DOWN - ? plan.transformExpressionsDown(expressionTypeToken, this::rule) - : plan.transformExpressionsUp(expressionTypeToken, this::rule); - } - - protected PhysicalPlan rule(PhysicalPlan plan) { - return plan; - } - - protected abstract Expression rule(E e); - - public Class expressionToken() { - return expressionTypeToken; - } - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java index 1c256012baeb0..be7096538fb9a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/CombineProjections.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.Project; @@ -61,12 +62,15 @@ protected LogicalPlan rule(UnaryPlan plan) { if (plan instanceof Aggregate a) { if (child instanceof Project p) { var groupings = a.groupings(); - List groupingAttrs = new ArrayList<>(a.groupings().size()); + List groupingAttrs = new ArrayList<>(a.groupings().size()); for (Expression grouping : groupings) { if (grouping instanceof Attribute attribute) { groupingAttrs.add(attribute); + } else if (grouping instanceof Alias as && as.child() instanceof Categorize) { + groupingAttrs.add(as); } else { - // After applying ReplaceAggregateNestedExpressionWithEval, groupings can only contain attributes. + // After applying ReplaceAggregateNestedExpressionWithEval, + // groupings (except Categorize) can only contain attributes. throw new EsqlIllegalArgumentException("Expected an Attribute, got {}", grouping); } } @@ -137,23 +141,33 @@ private static List combineProjections(List combineUpperGroupingsAndLowerProjections( - List upperGroupings, + List upperGroupings, List lowerProjections ) { // Collect the alias map for resolving the source (f1 = 1, f2 = f1, etc..) - AttributeMap aliases = new AttributeMap<>(); + AttributeMap aliases = new AttributeMap<>(); for (NamedExpression ne : lowerProjections) { - // Projections are just aliases for attributes, so casting is safe. - aliases.put(ne.toAttribute(), (Attribute) Alias.unwrap(ne)); + // record the alias + aliases.put(ne.toAttribute(), Alias.unwrap(ne)); } - // Replace any matching attribute directly with the aliased attribute from the projection. - AttributeSet replaced = new AttributeSet(); - for (Attribute attr : upperGroupings) { - // All substitutions happen before; groupings must be attributes at this point. - replaced.add(aliases.resolve(attr, attr)); + AttributeSet seen = new AttributeSet(); + List replaced = new ArrayList<>(); + for (NamedExpression ne : upperGroupings) { + // Duplicated attributes are ignored. + if (ne instanceof Attribute attribute) { + var newExpression = aliases.resolve(attribute, attribute); + if (newExpression instanceof Attribute newAttribute && seen.add(newAttribute) == false) { + // Already seen, skip + continue; + } + replaced.add(newExpression); + } else { + // For grouping functions, this will replace nested properties too + replaced.add(ne.transformUp(Attribute.class, a -> aliases.resolve(a, a))); + } } - return new ArrayList<>(replaced); + return replaced; } /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ExtractAggregateCommonFilter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ExtractAggregateCommonFilter.java new file mode 100644 index 0000000000000..f00a8103f913e --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ExtractAggregateCommonFilter.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.predicate.Predicates.extractCommon; + +/** + * Extract a per-function expression filter applied to all the aggs as a query {@link Filter}, when no groups are provided. + *

    + * Example: + *

    + *         ... | STATS MIN(a) WHERE b > 0, MIN(c) WHERE b > 0 | ...
    + *         =>
    + *         ... | WHERE b > 0 | STATS MIN(a), MIN(c) | ...
    + *     
    + */ +public final class ExtractAggregateCommonFilter extends OptimizerRules.OptimizerRule { + public ExtractAggregateCommonFilter() { + super(OptimizerRules.TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(Aggregate aggregate) { + if (aggregate.groupings().isEmpty() == false) { + return aggregate; // no optimization for grouped stats + } + + // collect all filters from the agg functions + List filters = new ArrayList<>(aggregate.aggregates().size()); + for (NamedExpression ne : aggregate.aggregates()) { + if (ne instanceof Alias alias && alias.child() instanceof AggregateFunction aggFunction && aggFunction.hasFilter()) { + filters.add(aggFunction.filter()); + } else { + return aggregate; // (at least one) agg function has no filter -- skip optimization + } + } + + // extract common filters + var common = extractCommon(filters); + if (common.v1() == null) { // no common filter + return aggregate; + } + + // replace agg functions' filters with trimmed ones + var newFilters = common.v2(); + List newAggs = new ArrayList<>(aggregate.aggregates().size()); + for (int i = 0; i < aggregate.aggregates().size(); i++) { + var alias = (Alias) aggregate.aggregates().get(i); + var newChild = ((AggregateFunction) alias.child()).withFilter(newFilters.get(i)); + newAggs.add(alias.replaceChild(newChild)); + } + + // build the new agg on top of extracted filter + return new Aggregate( + aggregate.source(), + new Filter(aggregate.source(), aggregate.child(), common.v1()), + aggregate.aggregateType(), + aggregate.groupings(), + newAggs + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java index 0f08cd66444a3..638fa1b8db456 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNull.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; public class FoldNull extends OptimizerRules.OptimizerExpressionRule { @@ -42,6 +43,7 @@ public Expression rule(Expression e) { } } else if (e instanceof Alias == false && e.nullable() == Nullability.TRUE + && e instanceof Categorize == false && Expressions.anyMatch(e.children(), Expressions::isNull)) { return Literal.of(e, null); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java index 153efa5b5c233..fb9d3f7e2f91e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineLimits.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; public final class PushDownAndCombineLimits extends OptimizerRules.OptimizerRule { @@ -63,7 +63,7 @@ public LogicalPlan rule(Limit limit) { } } } else if (limit.child() instanceof Join join) { - if (join.config().type() == JoinType.LEFT && join.right() instanceof LocalRelation) { + if (join.config().type() == JoinTypes.LEFT && join.right() instanceof LocalRelation) { // This is a hash join from something like a lookup. return join.replaceChildren(limit.replaceChild(join.left()), join.right()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java index 173940af19935..985e68252a1f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceAggregateNestedExpressionWithEval.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -46,15 +47,29 @@ protected LogicalPlan rule(Aggregate aggregate) { // start with the groupings since the aggs might duplicate it for (int i = 0, s = newGroupings.size(); i < s; i++) { Expression g = newGroupings.get(i); - // move the alias into an eval and replace it with its attribute + // Move the alias into an eval and replace it with its attribute. + // Exception: Categorize is internal to the aggregation and remains in the groupings. We move its child expression into an eval. if (g instanceof Alias as) { - groupingChanged = true; - var attr = as.toAttribute(); - evals.add(as); - evalNames.put(as.name(), attr); - newGroupings.set(i, attr); - if (as.child() instanceof GroupingFunction gf) { - groupingAttributes.put(gf, attr); + if (as.child() instanceof Categorize cat) { + if (cat.field() instanceof Attribute == false) { + groupingChanged = true; + var fieldAs = new Alias(as.source(), as.name(), cat.field(), null, true); + var fieldAttr = fieldAs.toAttribute(); + evals.add(fieldAs); + evalNames.put(fieldAs.name(), fieldAttr); + Categorize replacement = cat.replaceChildren(List.of(fieldAttr)); + newGroupings.set(i, as.replaceChild(replacement)); + groupingAttributes.put(cat, fieldAttr); + } + } else { + groupingChanged = true; + var attr = as.toAttribute(); + evals.add(as); + evalNames.put(as.name(), attr); + newGroupings.set(i, attr); + if (as.child() instanceof GroupingFunction gf) { + groupingAttributes.put(gf, attr); + } } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRowAsLocalRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRowAsLocalRelation.java new file mode 100644 index 0000000000000..eebeb1dc14f48 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceRowAsLocalRelation.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Row; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.util.ArrayList; +import java.util.List; + +public final class ReplaceRowAsLocalRelation extends OptimizerRules.OptimizerRule { + + @Override + protected LogicalPlan rule(Row row) { + var fields = row.fields(); + List values = new ArrayList<>(fields.size()); + fields.forEach(f -> values.add(f.child().fold())); + var blocks = BlockUtils.fromListRow(PlannerUtils.NON_BREAKING_BLOCK_FACTORY, values); + return new LocalRelation(row.source(), row.output(), LocalSupplier.of(blocks)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java index 9f5b35e1eb9fb..d73aaee655860 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/ProjectAwayColumns.java @@ -73,8 +73,7 @@ public PhysicalPlan apply(PhysicalPlan plan) { Source.EMPTY, new Project(logicalFragment.source(), logicalFragment, output), fragmentExec.esFilter(), - fragmentExec.estimatedRowSize(), - fragmentExec.reducer() + fragmentExec.estimatedRowSize() ); return new ExchangeExec(exec.source(), output, exec.inBetweenAggs(), newChild); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java index 1c20f765c6d51..cafe3726f92ac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -12,15 +12,18 @@ import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.TypedAttribute; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.optimizer.rules.physical.ProjectAwayColumns; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.LeafExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.rule.Rule; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; @@ -56,11 +59,17 @@ public PhysicalPlan apply(PhysicalPlan plan) { * make sure the fields are loaded for the standard hash aggregator. */ if (p instanceof AggregateExec agg && agg.groupings().size() == 1) { - var leaves = new LinkedList<>(); - // TODO: this seems out of place - agg.aggregates().stream().filter(a -> agg.groupings().contains(a) == false).forEach(a -> leaves.addAll(a.collectLeaves())); - var remove = agg.groupings().stream().filter(g -> leaves.contains(g) == false).toList(); - missing.removeAll(Expressions.references(remove)); + // CATEGORIZE requires the standard hash aggregator as well. + if (agg.groupings().get(0).anyMatch(e -> e instanceof Categorize) == false) { + var leaves = new LinkedList<>(); + // TODO: this seems out of place + agg.aggregates() + .stream() + .filter(a -> agg.groupings().contains(a) == false) + .forEach(a -> leaves.addAll(a.collectLeaves())); + var remove = agg.groupings().stream().filter(g -> leaves.contains(g) == false).toList(); + missing.removeAll(Expressions.references(remove)); + } } // add extractor @@ -93,9 +102,17 @@ public PhysicalPlan apply(PhysicalPlan plan) { private static Set missingAttributes(PhysicalPlan p) { var missing = new LinkedHashSet(); - var input = p.inputSet(); + var inputSet = p.inputSet(); + + // TODO: We need to extract whatever fields are missing from the left hand side. + // skip the lookup join since the right side is always materialized and a projection + if (p instanceof LookupJoinExec join) { + return Collections.emptySet(); + } + var input = inputSet; // collect field attributes used inside expressions + // TODO: Rather than going over all expressions manually, this should just call .references() p.forEachExpression(TypedAttribute.class, f -> { if (f instanceof FieldAttribute || f instanceof MetadataAttribute) { if (input.contains(f) == false) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushdownPredicates.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushdownPredicates.java index feb8717f007b7..8046d6bc56607 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushdownPredicates.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/LucenePushdownPredicates.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.stats.SearchStats; @@ -59,6 +60,10 @@ default boolean isPushableFieldAttribute(Expression exp) { return false; } + default boolean isPushableMetadataAttribute(Expression exp) { + return exp instanceof MetadataAttribute ma && ma.name().equals(MetadataAttribute.SCORE); + } + /** * The default implementation of this has no access to SearchStats, so it can only make decisions based on the FieldAttribute itself. * In particular, it assumes TEXT fields have no exact subfields (underlying keyword field), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java index f01e7c4b1f3a6..3d6c35e914294 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushFiltersToSource.java @@ -19,7 +19,6 @@ import org.elasticsearch.xpack.esql.core.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; import org.elasticsearch.xpack.esql.core.expression.predicate.Range; -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.nulls.IsNotNull; @@ -31,8 +30,8 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Queries; +import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; -import org.elasticsearch.xpack.esql.expression.function.fulltext.QueryString; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.BinarySpatialFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; @@ -253,12 +252,10 @@ static boolean canPushToSource(Expression exp, LucenePushdownPredicates lucenePu && Expressions.foldable(cidrMatch.matches()); } else if (exp instanceof SpatialRelatesFunction spatial) { return canPushSpatialFunctionToSource(spatial, lucenePushdownPredicates); - } else if (exp instanceof StringQueryPredicate) { - return true; - } else if (exp instanceof QueryString) { - return true; } else if (exp instanceof Match mf) { return mf.field() instanceof FieldAttribute && DataType.isString(mf.field().dataType()); + } else if (exp instanceof FullTextFunction) { + return true; } return false; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java index b0b86b43cd162..21bc360404628 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushStatsToSource.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.util.Queries; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; @@ -25,12 +26,15 @@ import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; import java.util.ArrayList; import java.util.List; +import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushFiltersToSource.canPushToSource; import static org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType.COUNT; /** @@ -98,6 +102,13 @@ private Tuple, List> pushableStats( } } if (fieldName != null) { + if (count.hasFilter()) { + if (canPushToSource(count.filter()) == false) { + return null; // can't push down + } + var countFilter = PlannerUtils.TRANSLATOR_HANDLER.asQuery(count.filter()); + query = Queries.combine(Queries.Clause.MUST, asList(countFilter.asBuilder(), query)); + } return new EsStatsQueryExec.Stat(fieldName, COUNT, query); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java index 925e144b69fcc..2b531257e594a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSource.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.esql.core.expression.AttributeMap; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.expression.Order; @@ -57,6 +58,7 @@ * */ public class PushTopNToSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule { + @Override protected PhysicalPlan rule(TopNExec topNExec, LocalPhysicalOptimizerContext ctx) { Pushable pushable = evaluatePushable(topNExec, LucenePushdownPredicates.from(ctx.searchStats())); @@ -155,6 +157,8 @@ && canPushDownOrders(topNExec.order(), lucenePushdownPredicates)) { order.nullsPosition() ) ); + } else if (lucenePushdownPredicates.isPushableMetadataAttribute(order.child())) { + pushableSorts.add(new EsQueryExec.ScoreSort(order.direction())); } else if (order.child() instanceof ReferenceAttribute referenceAttribute) { Attribute resolvedAttribute = aliasReplacedBy.resolve(referenceAttribute, referenceAttribute); if (distances.containsKey(resolvedAttribute.id())) { @@ -192,13 +196,23 @@ && canPushDownOrders(topNExec.order(), lucenePushdownPredicates)) { private static boolean canPushDownOrders(List orders, LucenePushdownPredicates lucenePushdownPredicates) { // allow only exact FieldAttributes (no expressions) for sorting - return orders.stream().allMatch(o -> lucenePushdownPredicates.isPushableFieldAttribute(o.child())); + return orders.stream() + .allMatch( + o -> lucenePushdownPredicates.isPushableFieldAttribute(o.child()) + || lucenePushdownPredicates.isPushableMetadataAttribute(o.child()) + ); } private static List buildFieldSorts(List orders) { List sorts = new ArrayList<>(orders.size()); for (Order o : orders) { - sorts.add(new EsQueryExec.FieldSort(((FieldAttribute) o.child()).exactAttribute(), o.direction(), o.nullsPosition())); + if (o.child() instanceof FieldAttribute fa) { + sorts.add(new EsQueryExec.FieldSort(fa.exactAttribute(), o.direction(), o.nullsPosition())); + } else if (o.child() instanceof MetadataAttribute ma && MetadataAttribute.SCORE.equals(ma.name())) { + sorts.add(new EsQueryExec.ScoreSort(o.direction())); + } else { + assert false : "unexpected ordering on expression type " + o.child().getClass(); + } } return sorts; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java index 74ea6f99e5e59..11e386ddd046c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import java.util.ArrayList; import java.util.List; import static org.elasticsearch.xpack.esql.optimizer.rules.logical.OptimizerRules.TransformDirection.UP; @@ -29,6 +30,8 @@ public ReplaceSourceAttributes() { @Override protected PhysicalPlan rule(EsSourceExec plan) { var docId = new FieldAttribute(plan.source(), EsQueryExec.DOC_ID_FIELD.getName(), EsQueryExec.DOC_ID_FIELD); + final List attributes = new ArrayList<>(); + attributes.add(docId); if (plan.indexMode() == IndexMode.TIME_SERIES) { Attribute tsid = null, timestamp = null; for (Attribute attr : plan.output()) { @@ -42,9 +45,14 @@ protected PhysicalPlan rule(EsSourceExec plan) { if (tsid == null || timestamp == null) { throw new IllegalStateException("_tsid or @timestamp are missing from the time-series source"); } - return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId, tsid, timestamp), plan.query()); - } else { - return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), List.of(docId), plan.query()); + attributes.add(tsid); + attributes.add(timestamp); } + plan.output().forEach(attr -> { + if (attr instanceof MetadataAttribute ma && ma.name().equals(MetadataAttribute.SCORE)) { + attributes.add(ma); + } + }); + return new EsQueryExec(plan.source(), plan.index(), plan.indexMode(), attributes, plan.query()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp index 8f9c5956dddd5..c83fdbe8847a9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.interp @@ -23,7 +23,11 @@ null null null null -':' +null +null +null +null +null '|' null null @@ -33,6 +37,7 @@ null 'asc' '=' '::' +':' ',' 'desc' '.' @@ -113,6 +118,10 @@ null null null null +'USING' +null +null +null null null null @@ -141,11 +150,15 @@ WHERE DEV_INLINESTATS DEV_LOOKUP DEV_METRICS +DEV_JOIN +DEV_JOIN_FULL +DEV_JOIN_LEFT +DEV_JOIN_RIGHT +DEV_JOIN_LOOKUP UNKNOWN_CMD LINE_COMMENT MULTILINE_COMMENT WS -COLON PIPE QUOTED_STRING INTEGER_LITERAL @@ -155,6 +168,7 @@ AND ASC ASSIGN CAST_OP +COLON COMMA DESC DOT @@ -235,6 +249,10 @@ LOOKUP_WS LOOKUP_FIELD_LINE_COMMENT LOOKUP_FIELD_MULTILINE_COMMENT LOOKUP_FIELD_WS +USING +JOIN_LINE_COMMENT +JOIN_MULTILINE_COMMENT +JOIN_WS METRICS_LINE_COMMENT METRICS_MULTILINE_COMMENT METRICS_WS @@ -262,11 +280,15 @@ WHERE DEV_INLINESTATS DEV_LOOKUP DEV_METRICS +DEV_JOIN +DEV_JOIN_FULL +DEV_JOIN_LEFT +DEV_JOIN_RIGHT +DEV_JOIN_LOOKUP UNKNOWN_CMD LINE_COMMENT MULTILINE_COMMENT WS -COLON PIPE DIGIT LETTER @@ -286,6 +308,7 @@ AND ASC ASSIGN CAST_OP +COLON COMMA DESC DOT @@ -316,7 +339,6 @@ MINUS ASTERISK SLASH PERCENT -EXPRESSION_COLON NESTED_WHERE NAMED_OR_POSITIONAL_PARAM OPENING_BRACKET @@ -427,6 +449,16 @@ LOOKUP_FIELD_ID_PATTERN LOOKUP_FIELD_LINE_COMMENT LOOKUP_FIELD_MULTILINE_COMMENT LOOKUP_FIELD_WS +JOIN_PIPE +JOIN_JOIN +JOIN_AS +JOIN_ON +USING +JOIN_UNQUOTED_IDENTIFER +JOIN_QUOTED_IDENTIFIER +JOIN_LINE_COMMENT +JOIN_MULTILINE_COMMENT +JOIN_WS METRICS_PIPE METRICS_UNQUOTED_SOURCE METRICS_QUOTED_SOURCE @@ -461,8 +493,9 @@ SHOW_MODE SETTING_MODE LOOKUP_MODE LOOKUP_FIELD_MODE +JOIN_MODE METRICS_MODE CLOSING_METRICS_MODE atn: -[4, 0, 119, 1484, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 2, 196, 7, 196, 2, 197, 7, 197, 2, 198, 7, 198, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 4, 19, 580, 8, 19, 11, 19, 12, 19, 581, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 5, 20, 590, 8, 20, 10, 20, 12, 20, 593, 9, 20, 1, 20, 3, 20, 596, 8, 20, 1, 20, 3, 20, 599, 8, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 608, 8, 21, 10, 21, 12, 21, 611, 9, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 4, 22, 619, 8, 22, 11, 22, 12, 22, 620, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 3, 29, 642, 8, 29, 1, 29, 4, 29, 645, 8, 29, 11, 29, 12, 29, 646, 1, 30, 1, 30, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 3, 32, 656, 8, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 3, 34, 663, 8, 34, 1, 35, 1, 35, 1, 35, 5, 35, 668, 8, 35, 10, 35, 12, 35, 671, 9, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 5, 35, 679, 8, 35, 10, 35, 12, 35, 682, 9, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 3, 35, 689, 8, 35, 1, 35, 3, 35, 692, 8, 35, 3, 35, 694, 8, 35, 1, 36, 4, 36, 697, 8, 36, 11, 36, 12, 36, 698, 1, 37, 4, 37, 702, 8, 37, 11, 37, 12, 37, 703, 1, 37, 1, 37, 5, 37, 708, 8, 37, 10, 37, 12, 37, 711, 9, 37, 1, 37, 1, 37, 4, 37, 715, 8, 37, 11, 37, 12, 37, 716, 1, 37, 4, 37, 720, 8, 37, 11, 37, 12, 37, 721, 1, 37, 1, 37, 5, 37, 726, 8, 37, 10, 37, 12, 37, 729, 9, 37, 3, 37, 731, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 4, 37, 737, 8, 37, 11, 37, 12, 37, 738, 1, 37, 1, 37, 3, 37, 743, 8, 37, 1, 38, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 69, 1, 69, 1, 70, 1, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 75, 3, 75, 874, 8, 75, 1, 75, 5, 75, 877, 8, 75, 10, 75, 12, 75, 880, 9, 75, 1, 75, 1, 75, 4, 75, 884, 8, 75, 11, 75, 12, 75, 885, 3, 75, 888, 8, 75, 1, 76, 1, 76, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 5, 78, 902, 8, 78, 10, 78, 12, 78, 905, 9, 78, 1, 78, 1, 78, 3, 78, 909, 8, 78, 1, 78, 4, 78, 912, 8, 78, 11, 78, 12, 78, 913, 3, 78, 916, 8, 78, 1, 79, 1, 79, 4, 79, 920, 8, 79, 11, 79, 12, 79, 921, 1, 79, 1, 79, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 3, 96, 999, 8, 96, 1, 97, 4, 97, 1002, 8, 97, 11, 97, 12, 97, 1003, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 3, 108, 1053, 8, 108, 1, 109, 1, 109, 3, 109, 1057, 8, 109, 1, 109, 5, 109, 1060, 8, 109, 10, 109, 12, 109, 1063, 9, 109, 1, 109, 1, 109, 3, 109, 1067, 8, 109, 1, 109, 4, 109, 1070, 8, 109, 11, 109, 12, 109, 1071, 3, 109, 1074, 8, 109, 1, 110, 1, 110, 4, 110, 1078, 8, 110, 11, 110, 12, 110, 1079, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 130, 4, 130, 1165, 8, 130, 11, 130, 12, 130, 1166, 1, 130, 1, 130, 3, 130, 1171, 8, 130, 1, 130, 4, 130, 1174, 8, 130, 11, 130, 12, 130, 1175, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 4, 163, 1321, 8, 163, 11, 163, 12, 163, 1322, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 1, 195, 1, 196, 1, 196, 1, 196, 1, 196, 1, 196, 1, 196, 1, 197, 1, 197, 1, 197, 1, 197, 1, 197, 1, 197, 1, 198, 1, 198, 1, 198, 1, 198, 1, 198, 2, 609, 680, 0, 199, 15, 1, 17, 2, 19, 3, 21, 4, 23, 5, 25, 6, 27, 7, 29, 8, 31, 9, 33, 10, 35, 11, 37, 12, 39, 13, 41, 14, 43, 15, 45, 16, 47, 17, 49, 18, 51, 19, 53, 20, 55, 21, 57, 22, 59, 23, 61, 24, 63, 25, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 0, 81, 0, 83, 0, 85, 26, 87, 27, 89, 28, 91, 29, 93, 30, 95, 31, 97, 32, 99, 33, 101, 34, 103, 35, 105, 36, 107, 37, 109, 38, 111, 39, 113, 40, 115, 41, 117, 42, 119, 43, 121, 44, 123, 45, 125, 46, 127, 47, 129, 48, 131, 49, 133, 50, 135, 51, 137, 52, 139, 53, 141, 54, 143, 55, 145, 56, 147, 57, 149, 58, 151, 59, 153, 60, 155, 61, 157, 62, 159, 63, 161, 0, 163, 0, 165, 64, 167, 65, 169, 66, 171, 67, 173, 0, 175, 68, 177, 69, 179, 70, 181, 71, 183, 0, 185, 0, 187, 72, 189, 73, 191, 74, 193, 0, 195, 0, 197, 0, 199, 0, 201, 0, 203, 0, 205, 75, 207, 0, 209, 76, 211, 0, 213, 0, 215, 77, 217, 78, 219, 79, 221, 0, 223, 0, 225, 0, 227, 0, 229, 0, 231, 0, 233, 0, 235, 80, 237, 81, 239, 82, 241, 83, 243, 0, 245, 0, 247, 0, 249, 0, 251, 0, 253, 0, 255, 84, 257, 0, 259, 85, 261, 86, 263, 87, 265, 0, 267, 0, 269, 88, 271, 89, 273, 0, 275, 90, 277, 0, 279, 91, 281, 92, 283, 93, 285, 0, 287, 0, 289, 0, 291, 0, 293, 0, 295, 0, 297, 0, 299, 0, 301, 0, 303, 94, 305, 95, 307, 96, 309, 0, 311, 0, 313, 0, 315, 0, 317, 0, 319, 0, 321, 97, 323, 98, 325, 99, 327, 0, 329, 100, 331, 101, 333, 102, 335, 103, 337, 0, 339, 0, 341, 104, 343, 105, 345, 106, 347, 107, 349, 0, 351, 0, 353, 0, 355, 0, 357, 0, 359, 0, 361, 0, 363, 108, 365, 109, 367, 110, 369, 0, 371, 0, 373, 0, 375, 0, 377, 111, 379, 112, 381, 113, 383, 0, 385, 0, 387, 0, 389, 114, 391, 115, 393, 116, 395, 0, 397, 0, 399, 117, 401, 118, 403, 119, 405, 0, 407, 0, 409, 0, 411, 0, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 35, 2, 0, 68, 68, 100, 100, 2, 0, 73, 73, 105, 105, 2, 0, 83, 83, 115, 115, 2, 0, 69, 69, 101, 101, 2, 0, 67, 67, 99, 99, 2, 0, 84, 84, 116, 116, 2, 0, 82, 82, 114, 114, 2, 0, 79, 79, 111, 111, 2, 0, 80, 80, 112, 112, 2, 0, 78, 78, 110, 110, 2, 0, 72, 72, 104, 104, 2, 0, 86, 86, 118, 118, 2, 0, 65, 65, 97, 97, 2, 0, 76, 76, 108, 108, 2, 0, 88, 88, 120, 120, 2, 0, 70, 70, 102, 102, 2, 0, 77, 77, 109, 109, 2, 0, 71, 71, 103, 103, 2, 0, 75, 75, 107, 107, 2, 0, 87, 87, 119, 119, 2, 0, 85, 85, 117, 117, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 8, 0, 34, 34, 78, 78, 82, 82, 84, 84, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 2, 0, 66, 66, 98, 98, 2, 0, 89, 89, 121, 121, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1512, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 1, 63, 1, 0, 0, 0, 1, 85, 1, 0, 0, 0, 1, 87, 1, 0, 0, 0, 1, 89, 1, 0, 0, 0, 1, 91, 1, 0, 0, 0, 1, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 1, 97, 1, 0, 0, 0, 1, 99, 1, 0, 0, 0, 1, 101, 1, 0, 0, 0, 1, 103, 1, 0, 0, 0, 1, 105, 1, 0, 0, 0, 1, 107, 1, 0, 0, 0, 1, 109, 1, 0, 0, 0, 1, 111, 1, 0, 0, 0, 1, 113, 1, 0, 0, 0, 1, 115, 1, 0, 0, 0, 1, 117, 1, 0, 0, 0, 1, 119, 1, 0, 0, 0, 1, 121, 1, 0, 0, 0, 1, 123, 1, 0, 0, 0, 1, 125, 1, 0, 0, 0, 1, 127, 1, 0, 0, 0, 1, 129, 1, 0, 0, 0, 1, 131, 1, 0, 0, 0, 1, 133, 1, 0, 0, 0, 1, 135, 1, 0, 0, 0, 1, 137, 1, 0, 0, 0, 1, 139, 1, 0, 0, 0, 1, 141, 1, 0, 0, 0, 1, 143, 1, 0, 0, 0, 1, 145, 1, 0, 0, 0, 1, 147, 1, 0, 0, 0, 1, 149, 1, 0, 0, 0, 1, 151, 1, 0, 0, 0, 1, 153, 1, 0, 0, 0, 1, 155, 1, 0, 0, 0, 1, 157, 1, 0, 0, 0, 1, 159, 1, 0, 0, 0, 1, 161, 1, 0, 0, 0, 1, 163, 1, 0, 0, 0, 1, 165, 1, 0, 0, 0, 1, 167, 1, 0, 0, 0, 1, 169, 1, 0, 0, 0, 1, 171, 1, 0, 0, 0, 1, 175, 1, 0, 0, 0, 1, 177, 1, 0, 0, 0, 1, 179, 1, 0, 0, 0, 1, 181, 1, 0, 0, 0, 2, 183, 1, 0, 0, 0, 2, 185, 1, 0, 0, 0, 2, 187, 1, 0, 0, 0, 2, 189, 1, 0, 0, 0, 2, 191, 1, 0, 0, 0, 3, 193, 1, 0, 0, 0, 3, 195, 1, 0, 0, 0, 3, 197, 1, 0, 0, 0, 3, 199, 1, 0, 0, 0, 3, 201, 1, 0, 0, 0, 3, 203, 1, 0, 0, 0, 3, 205, 1, 0, 0, 0, 3, 209, 1, 0, 0, 0, 3, 211, 1, 0, 0, 0, 3, 213, 1, 0, 0, 0, 3, 215, 1, 0, 0, 0, 3, 217, 1, 0, 0, 0, 3, 219, 1, 0, 0, 0, 4, 221, 1, 0, 0, 0, 4, 223, 1, 0, 0, 0, 4, 225, 1, 0, 0, 0, 4, 227, 1, 0, 0, 0, 4, 229, 1, 0, 0, 0, 4, 235, 1, 0, 0, 0, 4, 237, 1, 0, 0, 0, 4, 239, 1, 0, 0, 0, 4, 241, 1, 0, 0, 0, 5, 243, 1, 0, 0, 0, 5, 245, 1, 0, 0, 0, 5, 247, 1, 0, 0, 0, 5, 249, 1, 0, 0, 0, 5, 251, 1, 0, 0, 0, 5, 253, 1, 0, 0, 0, 5, 255, 1, 0, 0, 0, 5, 257, 1, 0, 0, 0, 5, 259, 1, 0, 0, 0, 5, 261, 1, 0, 0, 0, 5, 263, 1, 0, 0, 0, 6, 265, 1, 0, 0, 0, 6, 267, 1, 0, 0, 0, 6, 269, 1, 0, 0, 0, 6, 271, 1, 0, 0, 0, 6, 275, 1, 0, 0, 0, 6, 277, 1, 0, 0, 0, 6, 279, 1, 0, 0, 0, 6, 281, 1, 0, 0, 0, 6, 283, 1, 0, 0, 0, 7, 285, 1, 0, 0, 0, 7, 287, 1, 0, 0, 0, 7, 289, 1, 0, 0, 0, 7, 291, 1, 0, 0, 0, 7, 293, 1, 0, 0, 0, 7, 295, 1, 0, 0, 0, 7, 297, 1, 0, 0, 0, 7, 299, 1, 0, 0, 0, 7, 301, 1, 0, 0, 0, 7, 303, 1, 0, 0, 0, 7, 305, 1, 0, 0, 0, 7, 307, 1, 0, 0, 0, 8, 309, 1, 0, 0, 0, 8, 311, 1, 0, 0, 0, 8, 313, 1, 0, 0, 0, 8, 315, 1, 0, 0, 0, 8, 317, 1, 0, 0, 0, 8, 319, 1, 0, 0, 0, 8, 321, 1, 0, 0, 0, 8, 323, 1, 0, 0, 0, 8, 325, 1, 0, 0, 0, 9, 327, 1, 0, 0, 0, 9, 329, 1, 0, 0, 0, 9, 331, 1, 0, 0, 0, 9, 333, 1, 0, 0, 0, 9, 335, 1, 0, 0, 0, 10, 337, 1, 0, 0, 0, 10, 339, 1, 0, 0, 0, 10, 341, 1, 0, 0, 0, 10, 343, 1, 0, 0, 0, 10, 345, 1, 0, 0, 0, 10, 347, 1, 0, 0, 0, 11, 349, 1, 0, 0, 0, 11, 351, 1, 0, 0, 0, 11, 353, 1, 0, 0, 0, 11, 355, 1, 0, 0, 0, 11, 357, 1, 0, 0, 0, 11, 359, 1, 0, 0, 0, 11, 361, 1, 0, 0, 0, 11, 363, 1, 0, 0, 0, 11, 365, 1, 0, 0, 0, 11, 367, 1, 0, 0, 0, 12, 369, 1, 0, 0, 0, 12, 371, 1, 0, 0, 0, 12, 373, 1, 0, 0, 0, 12, 375, 1, 0, 0, 0, 12, 377, 1, 0, 0, 0, 12, 379, 1, 0, 0, 0, 12, 381, 1, 0, 0, 0, 13, 383, 1, 0, 0, 0, 13, 385, 1, 0, 0, 0, 13, 387, 1, 0, 0, 0, 13, 389, 1, 0, 0, 0, 13, 391, 1, 0, 0, 0, 13, 393, 1, 0, 0, 0, 14, 395, 1, 0, 0, 0, 14, 397, 1, 0, 0, 0, 14, 399, 1, 0, 0, 0, 14, 401, 1, 0, 0, 0, 14, 403, 1, 0, 0, 0, 14, 405, 1, 0, 0, 0, 14, 407, 1, 0, 0, 0, 14, 409, 1, 0, 0, 0, 14, 411, 1, 0, 0, 0, 15, 413, 1, 0, 0, 0, 17, 423, 1, 0, 0, 0, 19, 430, 1, 0, 0, 0, 21, 439, 1, 0, 0, 0, 23, 446, 1, 0, 0, 0, 25, 456, 1, 0, 0, 0, 27, 463, 1, 0, 0, 0, 29, 470, 1, 0, 0, 0, 31, 477, 1, 0, 0, 0, 33, 485, 1, 0, 0, 0, 35, 497, 1, 0, 0, 0, 37, 506, 1, 0, 0, 0, 39, 512, 1, 0, 0, 0, 41, 519, 1, 0, 0, 0, 43, 526, 1, 0, 0, 0, 45, 534, 1, 0, 0, 0, 47, 542, 1, 0, 0, 0, 49, 557, 1, 0, 0, 0, 51, 567, 1, 0, 0, 0, 53, 579, 1, 0, 0, 0, 55, 585, 1, 0, 0, 0, 57, 602, 1, 0, 0, 0, 59, 618, 1, 0, 0, 0, 61, 624, 1, 0, 0, 0, 63, 626, 1, 0, 0, 0, 65, 630, 1, 0, 0, 0, 67, 632, 1, 0, 0, 0, 69, 634, 1, 0, 0, 0, 71, 637, 1, 0, 0, 0, 73, 639, 1, 0, 0, 0, 75, 648, 1, 0, 0, 0, 77, 650, 1, 0, 0, 0, 79, 655, 1, 0, 0, 0, 81, 657, 1, 0, 0, 0, 83, 662, 1, 0, 0, 0, 85, 693, 1, 0, 0, 0, 87, 696, 1, 0, 0, 0, 89, 742, 1, 0, 0, 0, 91, 744, 1, 0, 0, 0, 93, 747, 1, 0, 0, 0, 95, 751, 1, 0, 0, 0, 97, 755, 1, 0, 0, 0, 99, 757, 1, 0, 0, 0, 101, 760, 1, 0, 0, 0, 103, 762, 1, 0, 0, 0, 105, 767, 1, 0, 0, 0, 107, 769, 1, 0, 0, 0, 109, 775, 1, 0, 0, 0, 111, 781, 1, 0, 0, 0, 113, 784, 1, 0, 0, 0, 115, 787, 1, 0, 0, 0, 117, 792, 1, 0, 0, 0, 119, 797, 1, 0, 0, 0, 121, 799, 1, 0, 0, 0, 123, 803, 1, 0, 0, 0, 125, 808, 1, 0, 0, 0, 127, 814, 1, 0, 0, 0, 129, 817, 1, 0, 0, 0, 131, 819, 1, 0, 0, 0, 133, 825, 1, 0, 0, 0, 135, 827, 1, 0, 0, 0, 137, 832, 1, 0, 0, 0, 139, 835, 1, 0, 0, 0, 141, 838, 1, 0, 0, 0, 143, 841, 1, 0, 0, 0, 145, 843, 1, 0, 0, 0, 147, 846, 1, 0, 0, 0, 149, 848, 1, 0, 0, 0, 151, 851, 1, 0, 0, 0, 153, 853, 1, 0, 0, 0, 155, 855, 1, 0, 0, 0, 157, 857, 1, 0, 0, 0, 159, 859, 1, 0, 0, 0, 161, 861, 1, 0, 0, 0, 163, 866, 1, 0, 0, 0, 165, 887, 1, 0, 0, 0, 167, 889, 1, 0, 0, 0, 169, 894, 1, 0, 0, 0, 171, 915, 1, 0, 0, 0, 173, 917, 1, 0, 0, 0, 175, 925, 1, 0, 0, 0, 177, 927, 1, 0, 0, 0, 179, 931, 1, 0, 0, 0, 181, 935, 1, 0, 0, 0, 183, 939, 1, 0, 0, 0, 185, 944, 1, 0, 0, 0, 187, 949, 1, 0, 0, 0, 189, 953, 1, 0, 0, 0, 191, 957, 1, 0, 0, 0, 193, 961, 1, 0, 0, 0, 195, 966, 1, 0, 0, 0, 197, 970, 1, 0, 0, 0, 199, 974, 1, 0, 0, 0, 201, 978, 1, 0, 0, 0, 203, 982, 1, 0, 0, 0, 205, 986, 1, 0, 0, 0, 207, 998, 1, 0, 0, 0, 209, 1001, 1, 0, 0, 0, 211, 1005, 1, 0, 0, 0, 213, 1009, 1, 0, 0, 0, 215, 1013, 1, 0, 0, 0, 217, 1017, 1, 0, 0, 0, 219, 1021, 1, 0, 0, 0, 221, 1025, 1, 0, 0, 0, 223, 1030, 1, 0, 0, 0, 225, 1034, 1, 0, 0, 0, 227, 1038, 1, 0, 0, 0, 229, 1043, 1, 0, 0, 0, 231, 1052, 1, 0, 0, 0, 233, 1073, 1, 0, 0, 0, 235, 1077, 1, 0, 0, 0, 237, 1081, 1, 0, 0, 0, 239, 1085, 1, 0, 0, 0, 241, 1089, 1, 0, 0, 0, 243, 1093, 1, 0, 0, 0, 245, 1098, 1, 0, 0, 0, 247, 1102, 1, 0, 0, 0, 249, 1106, 1, 0, 0, 0, 251, 1110, 1, 0, 0, 0, 253, 1115, 1, 0, 0, 0, 255, 1120, 1, 0, 0, 0, 257, 1123, 1, 0, 0, 0, 259, 1127, 1, 0, 0, 0, 261, 1131, 1, 0, 0, 0, 263, 1135, 1, 0, 0, 0, 265, 1139, 1, 0, 0, 0, 267, 1144, 1, 0, 0, 0, 269, 1149, 1, 0, 0, 0, 271, 1154, 1, 0, 0, 0, 273, 1161, 1, 0, 0, 0, 275, 1170, 1, 0, 0, 0, 277, 1177, 1, 0, 0, 0, 279, 1181, 1, 0, 0, 0, 281, 1185, 1, 0, 0, 0, 283, 1189, 1, 0, 0, 0, 285, 1193, 1, 0, 0, 0, 287, 1199, 1, 0, 0, 0, 289, 1203, 1, 0, 0, 0, 291, 1207, 1, 0, 0, 0, 293, 1211, 1, 0, 0, 0, 295, 1215, 1, 0, 0, 0, 297, 1219, 1, 0, 0, 0, 299, 1223, 1, 0, 0, 0, 301, 1228, 1, 0, 0, 0, 303, 1233, 1, 0, 0, 0, 305, 1237, 1, 0, 0, 0, 307, 1241, 1, 0, 0, 0, 309, 1245, 1, 0, 0, 0, 311, 1250, 1, 0, 0, 0, 313, 1254, 1, 0, 0, 0, 315, 1259, 1, 0, 0, 0, 317, 1264, 1, 0, 0, 0, 319, 1268, 1, 0, 0, 0, 321, 1272, 1, 0, 0, 0, 323, 1276, 1, 0, 0, 0, 325, 1280, 1, 0, 0, 0, 327, 1284, 1, 0, 0, 0, 329, 1289, 1, 0, 0, 0, 331, 1294, 1, 0, 0, 0, 333, 1298, 1, 0, 0, 0, 335, 1302, 1, 0, 0, 0, 337, 1306, 1, 0, 0, 0, 339, 1311, 1, 0, 0, 0, 341, 1320, 1, 0, 0, 0, 343, 1324, 1, 0, 0, 0, 345, 1328, 1, 0, 0, 0, 347, 1332, 1, 0, 0, 0, 349, 1336, 1, 0, 0, 0, 351, 1341, 1, 0, 0, 0, 353, 1345, 1, 0, 0, 0, 355, 1349, 1, 0, 0, 0, 357, 1353, 1, 0, 0, 0, 359, 1358, 1, 0, 0, 0, 361, 1362, 1, 0, 0, 0, 363, 1366, 1, 0, 0, 0, 365, 1370, 1, 0, 0, 0, 367, 1374, 1, 0, 0, 0, 369, 1378, 1, 0, 0, 0, 371, 1384, 1, 0, 0, 0, 373, 1388, 1, 0, 0, 0, 375, 1392, 1, 0, 0, 0, 377, 1396, 1, 0, 0, 0, 379, 1400, 1, 0, 0, 0, 381, 1404, 1, 0, 0, 0, 383, 1408, 1, 0, 0, 0, 385, 1413, 1, 0, 0, 0, 387, 1419, 1, 0, 0, 0, 389, 1425, 1, 0, 0, 0, 391, 1429, 1, 0, 0, 0, 393, 1433, 1, 0, 0, 0, 395, 1437, 1, 0, 0, 0, 397, 1443, 1, 0, 0, 0, 399, 1449, 1, 0, 0, 0, 401, 1453, 1, 0, 0, 0, 403, 1457, 1, 0, 0, 0, 405, 1461, 1, 0, 0, 0, 407, 1467, 1, 0, 0, 0, 409, 1473, 1, 0, 0, 0, 411, 1479, 1, 0, 0, 0, 413, 414, 7, 0, 0, 0, 414, 415, 7, 1, 0, 0, 415, 416, 7, 2, 0, 0, 416, 417, 7, 2, 0, 0, 417, 418, 7, 3, 0, 0, 418, 419, 7, 4, 0, 0, 419, 420, 7, 5, 0, 0, 420, 421, 1, 0, 0, 0, 421, 422, 6, 0, 0, 0, 422, 16, 1, 0, 0, 0, 423, 424, 7, 0, 0, 0, 424, 425, 7, 6, 0, 0, 425, 426, 7, 7, 0, 0, 426, 427, 7, 8, 0, 0, 427, 428, 1, 0, 0, 0, 428, 429, 6, 1, 1, 0, 429, 18, 1, 0, 0, 0, 430, 431, 7, 3, 0, 0, 431, 432, 7, 9, 0, 0, 432, 433, 7, 6, 0, 0, 433, 434, 7, 1, 0, 0, 434, 435, 7, 4, 0, 0, 435, 436, 7, 10, 0, 0, 436, 437, 1, 0, 0, 0, 437, 438, 6, 2, 2, 0, 438, 20, 1, 0, 0, 0, 439, 440, 7, 3, 0, 0, 440, 441, 7, 11, 0, 0, 441, 442, 7, 12, 0, 0, 442, 443, 7, 13, 0, 0, 443, 444, 1, 0, 0, 0, 444, 445, 6, 3, 0, 0, 445, 22, 1, 0, 0, 0, 446, 447, 7, 3, 0, 0, 447, 448, 7, 14, 0, 0, 448, 449, 7, 8, 0, 0, 449, 450, 7, 13, 0, 0, 450, 451, 7, 12, 0, 0, 451, 452, 7, 1, 0, 0, 452, 453, 7, 9, 0, 0, 453, 454, 1, 0, 0, 0, 454, 455, 6, 4, 3, 0, 455, 24, 1, 0, 0, 0, 456, 457, 7, 15, 0, 0, 457, 458, 7, 6, 0, 0, 458, 459, 7, 7, 0, 0, 459, 460, 7, 16, 0, 0, 460, 461, 1, 0, 0, 0, 461, 462, 6, 5, 4, 0, 462, 26, 1, 0, 0, 0, 463, 464, 7, 17, 0, 0, 464, 465, 7, 6, 0, 0, 465, 466, 7, 7, 0, 0, 466, 467, 7, 18, 0, 0, 467, 468, 1, 0, 0, 0, 468, 469, 6, 6, 0, 0, 469, 28, 1, 0, 0, 0, 470, 471, 7, 18, 0, 0, 471, 472, 7, 3, 0, 0, 472, 473, 7, 3, 0, 0, 473, 474, 7, 8, 0, 0, 474, 475, 1, 0, 0, 0, 475, 476, 6, 7, 1, 0, 476, 30, 1, 0, 0, 0, 477, 478, 7, 13, 0, 0, 478, 479, 7, 1, 0, 0, 479, 480, 7, 16, 0, 0, 480, 481, 7, 1, 0, 0, 481, 482, 7, 5, 0, 0, 482, 483, 1, 0, 0, 0, 483, 484, 6, 8, 0, 0, 484, 32, 1, 0, 0, 0, 485, 486, 7, 16, 0, 0, 486, 487, 7, 11, 0, 0, 487, 488, 5, 95, 0, 0, 488, 489, 7, 3, 0, 0, 489, 490, 7, 14, 0, 0, 490, 491, 7, 8, 0, 0, 491, 492, 7, 12, 0, 0, 492, 493, 7, 9, 0, 0, 493, 494, 7, 0, 0, 0, 494, 495, 1, 0, 0, 0, 495, 496, 6, 9, 5, 0, 496, 34, 1, 0, 0, 0, 497, 498, 7, 6, 0, 0, 498, 499, 7, 3, 0, 0, 499, 500, 7, 9, 0, 0, 500, 501, 7, 12, 0, 0, 501, 502, 7, 16, 0, 0, 502, 503, 7, 3, 0, 0, 503, 504, 1, 0, 0, 0, 504, 505, 6, 10, 6, 0, 505, 36, 1, 0, 0, 0, 506, 507, 7, 6, 0, 0, 507, 508, 7, 7, 0, 0, 508, 509, 7, 19, 0, 0, 509, 510, 1, 0, 0, 0, 510, 511, 6, 11, 0, 0, 511, 38, 1, 0, 0, 0, 512, 513, 7, 2, 0, 0, 513, 514, 7, 10, 0, 0, 514, 515, 7, 7, 0, 0, 515, 516, 7, 19, 0, 0, 516, 517, 1, 0, 0, 0, 517, 518, 6, 12, 7, 0, 518, 40, 1, 0, 0, 0, 519, 520, 7, 2, 0, 0, 520, 521, 7, 7, 0, 0, 521, 522, 7, 6, 0, 0, 522, 523, 7, 5, 0, 0, 523, 524, 1, 0, 0, 0, 524, 525, 6, 13, 0, 0, 525, 42, 1, 0, 0, 0, 526, 527, 7, 2, 0, 0, 527, 528, 7, 5, 0, 0, 528, 529, 7, 12, 0, 0, 529, 530, 7, 5, 0, 0, 530, 531, 7, 2, 0, 0, 531, 532, 1, 0, 0, 0, 532, 533, 6, 14, 0, 0, 533, 44, 1, 0, 0, 0, 534, 535, 7, 19, 0, 0, 535, 536, 7, 10, 0, 0, 536, 537, 7, 3, 0, 0, 537, 538, 7, 6, 0, 0, 538, 539, 7, 3, 0, 0, 539, 540, 1, 0, 0, 0, 540, 541, 6, 15, 0, 0, 541, 46, 1, 0, 0, 0, 542, 543, 4, 16, 0, 0, 543, 544, 7, 1, 0, 0, 544, 545, 7, 9, 0, 0, 545, 546, 7, 13, 0, 0, 546, 547, 7, 1, 0, 0, 547, 548, 7, 9, 0, 0, 548, 549, 7, 3, 0, 0, 549, 550, 7, 2, 0, 0, 550, 551, 7, 5, 0, 0, 551, 552, 7, 12, 0, 0, 552, 553, 7, 5, 0, 0, 553, 554, 7, 2, 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 6, 16, 0, 0, 556, 48, 1, 0, 0, 0, 557, 558, 4, 17, 1, 0, 558, 559, 7, 13, 0, 0, 559, 560, 7, 7, 0, 0, 560, 561, 7, 7, 0, 0, 561, 562, 7, 18, 0, 0, 562, 563, 7, 20, 0, 0, 563, 564, 7, 8, 0, 0, 564, 565, 1, 0, 0, 0, 565, 566, 6, 17, 8, 0, 566, 50, 1, 0, 0, 0, 567, 568, 4, 18, 2, 0, 568, 569, 7, 16, 0, 0, 569, 570, 7, 3, 0, 0, 570, 571, 7, 5, 0, 0, 571, 572, 7, 6, 0, 0, 572, 573, 7, 1, 0, 0, 573, 574, 7, 4, 0, 0, 574, 575, 7, 2, 0, 0, 575, 576, 1, 0, 0, 0, 576, 577, 6, 18, 9, 0, 577, 52, 1, 0, 0, 0, 578, 580, 8, 21, 0, 0, 579, 578, 1, 0, 0, 0, 580, 581, 1, 0, 0, 0, 581, 579, 1, 0, 0, 0, 581, 582, 1, 0, 0, 0, 582, 583, 1, 0, 0, 0, 583, 584, 6, 19, 0, 0, 584, 54, 1, 0, 0, 0, 585, 586, 5, 47, 0, 0, 586, 587, 5, 47, 0, 0, 587, 591, 1, 0, 0, 0, 588, 590, 8, 22, 0, 0, 589, 588, 1, 0, 0, 0, 590, 593, 1, 0, 0, 0, 591, 589, 1, 0, 0, 0, 591, 592, 1, 0, 0, 0, 592, 595, 1, 0, 0, 0, 593, 591, 1, 0, 0, 0, 594, 596, 5, 13, 0, 0, 595, 594, 1, 0, 0, 0, 595, 596, 1, 0, 0, 0, 596, 598, 1, 0, 0, 0, 597, 599, 5, 10, 0, 0, 598, 597, 1, 0, 0, 0, 598, 599, 1, 0, 0, 0, 599, 600, 1, 0, 0, 0, 600, 601, 6, 20, 10, 0, 601, 56, 1, 0, 0, 0, 602, 603, 5, 47, 0, 0, 603, 604, 5, 42, 0, 0, 604, 609, 1, 0, 0, 0, 605, 608, 3, 57, 21, 0, 606, 608, 9, 0, 0, 0, 607, 605, 1, 0, 0, 0, 607, 606, 1, 0, 0, 0, 608, 611, 1, 0, 0, 0, 609, 610, 1, 0, 0, 0, 609, 607, 1, 0, 0, 0, 610, 612, 1, 0, 0, 0, 611, 609, 1, 0, 0, 0, 612, 613, 5, 42, 0, 0, 613, 614, 5, 47, 0, 0, 614, 615, 1, 0, 0, 0, 615, 616, 6, 21, 10, 0, 616, 58, 1, 0, 0, 0, 617, 619, 7, 23, 0, 0, 618, 617, 1, 0, 0, 0, 619, 620, 1, 0, 0, 0, 620, 618, 1, 0, 0, 0, 620, 621, 1, 0, 0, 0, 621, 622, 1, 0, 0, 0, 622, 623, 6, 22, 10, 0, 623, 60, 1, 0, 0, 0, 624, 625, 5, 58, 0, 0, 625, 62, 1, 0, 0, 0, 626, 627, 5, 124, 0, 0, 627, 628, 1, 0, 0, 0, 628, 629, 6, 24, 11, 0, 629, 64, 1, 0, 0, 0, 630, 631, 7, 24, 0, 0, 631, 66, 1, 0, 0, 0, 632, 633, 7, 25, 0, 0, 633, 68, 1, 0, 0, 0, 634, 635, 5, 92, 0, 0, 635, 636, 7, 26, 0, 0, 636, 70, 1, 0, 0, 0, 637, 638, 8, 27, 0, 0, 638, 72, 1, 0, 0, 0, 639, 641, 7, 3, 0, 0, 640, 642, 7, 28, 0, 0, 641, 640, 1, 0, 0, 0, 641, 642, 1, 0, 0, 0, 642, 644, 1, 0, 0, 0, 643, 645, 3, 65, 25, 0, 644, 643, 1, 0, 0, 0, 645, 646, 1, 0, 0, 0, 646, 644, 1, 0, 0, 0, 646, 647, 1, 0, 0, 0, 647, 74, 1, 0, 0, 0, 648, 649, 5, 64, 0, 0, 649, 76, 1, 0, 0, 0, 650, 651, 5, 96, 0, 0, 651, 78, 1, 0, 0, 0, 652, 656, 8, 29, 0, 0, 653, 654, 5, 96, 0, 0, 654, 656, 5, 96, 0, 0, 655, 652, 1, 0, 0, 0, 655, 653, 1, 0, 0, 0, 656, 80, 1, 0, 0, 0, 657, 658, 5, 95, 0, 0, 658, 82, 1, 0, 0, 0, 659, 663, 3, 67, 26, 0, 660, 663, 3, 65, 25, 0, 661, 663, 3, 81, 33, 0, 662, 659, 1, 0, 0, 0, 662, 660, 1, 0, 0, 0, 662, 661, 1, 0, 0, 0, 663, 84, 1, 0, 0, 0, 664, 669, 5, 34, 0, 0, 665, 668, 3, 69, 27, 0, 666, 668, 3, 71, 28, 0, 667, 665, 1, 0, 0, 0, 667, 666, 1, 0, 0, 0, 668, 671, 1, 0, 0, 0, 669, 667, 1, 0, 0, 0, 669, 670, 1, 0, 0, 0, 670, 672, 1, 0, 0, 0, 671, 669, 1, 0, 0, 0, 672, 694, 5, 34, 0, 0, 673, 674, 5, 34, 0, 0, 674, 675, 5, 34, 0, 0, 675, 676, 5, 34, 0, 0, 676, 680, 1, 0, 0, 0, 677, 679, 8, 22, 0, 0, 678, 677, 1, 0, 0, 0, 679, 682, 1, 0, 0, 0, 680, 681, 1, 0, 0, 0, 680, 678, 1, 0, 0, 0, 681, 683, 1, 0, 0, 0, 682, 680, 1, 0, 0, 0, 683, 684, 5, 34, 0, 0, 684, 685, 5, 34, 0, 0, 685, 686, 5, 34, 0, 0, 686, 688, 1, 0, 0, 0, 687, 689, 5, 34, 0, 0, 688, 687, 1, 0, 0, 0, 688, 689, 1, 0, 0, 0, 689, 691, 1, 0, 0, 0, 690, 692, 5, 34, 0, 0, 691, 690, 1, 0, 0, 0, 691, 692, 1, 0, 0, 0, 692, 694, 1, 0, 0, 0, 693, 664, 1, 0, 0, 0, 693, 673, 1, 0, 0, 0, 694, 86, 1, 0, 0, 0, 695, 697, 3, 65, 25, 0, 696, 695, 1, 0, 0, 0, 697, 698, 1, 0, 0, 0, 698, 696, 1, 0, 0, 0, 698, 699, 1, 0, 0, 0, 699, 88, 1, 0, 0, 0, 700, 702, 3, 65, 25, 0, 701, 700, 1, 0, 0, 0, 702, 703, 1, 0, 0, 0, 703, 701, 1, 0, 0, 0, 703, 704, 1, 0, 0, 0, 704, 705, 1, 0, 0, 0, 705, 709, 3, 105, 45, 0, 706, 708, 3, 65, 25, 0, 707, 706, 1, 0, 0, 0, 708, 711, 1, 0, 0, 0, 709, 707, 1, 0, 0, 0, 709, 710, 1, 0, 0, 0, 710, 743, 1, 0, 0, 0, 711, 709, 1, 0, 0, 0, 712, 714, 3, 105, 45, 0, 713, 715, 3, 65, 25, 0, 714, 713, 1, 0, 0, 0, 715, 716, 1, 0, 0, 0, 716, 714, 1, 0, 0, 0, 716, 717, 1, 0, 0, 0, 717, 743, 1, 0, 0, 0, 718, 720, 3, 65, 25, 0, 719, 718, 1, 0, 0, 0, 720, 721, 1, 0, 0, 0, 721, 719, 1, 0, 0, 0, 721, 722, 1, 0, 0, 0, 722, 730, 1, 0, 0, 0, 723, 727, 3, 105, 45, 0, 724, 726, 3, 65, 25, 0, 725, 724, 1, 0, 0, 0, 726, 729, 1, 0, 0, 0, 727, 725, 1, 0, 0, 0, 727, 728, 1, 0, 0, 0, 728, 731, 1, 0, 0, 0, 729, 727, 1, 0, 0, 0, 730, 723, 1, 0, 0, 0, 730, 731, 1, 0, 0, 0, 731, 732, 1, 0, 0, 0, 732, 733, 3, 73, 29, 0, 733, 743, 1, 0, 0, 0, 734, 736, 3, 105, 45, 0, 735, 737, 3, 65, 25, 0, 736, 735, 1, 0, 0, 0, 737, 738, 1, 0, 0, 0, 738, 736, 1, 0, 0, 0, 738, 739, 1, 0, 0, 0, 739, 740, 1, 0, 0, 0, 740, 741, 3, 73, 29, 0, 741, 743, 1, 0, 0, 0, 742, 701, 1, 0, 0, 0, 742, 712, 1, 0, 0, 0, 742, 719, 1, 0, 0, 0, 742, 734, 1, 0, 0, 0, 743, 90, 1, 0, 0, 0, 744, 745, 7, 30, 0, 0, 745, 746, 7, 31, 0, 0, 746, 92, 1, 0, 0, 0, 747, 748, 7, 12, 0, 0, 748, 749, 7, 9, 0, 0, 749, 750, 7, 0, 0, 0, 750, 94, 1, 0, 0, 0, 751, 752, 7, 12, 0, 0, 752, 753, 7, 2, 0, 0, 753, 754, 7, 4, 0, 0, 754, 96, 1, 0, 0, 0, 755, 756, 5, 61, 0, 0, 756, 98, 1, 0, 0, 0, 757, 758, 5, 58, 0, 0, 758, 759, 5, 58, 0, 0, 759, 100, 1, 0, 0, 0, 760, 761, 5, 44, 0, 0, 761, 102, 1, 0, 0, 0, 762, 763, 7, 0, 0, 0, 763, 764, 7, 3, 0, 0, 764, 765, 7, 2, 0, 0, 765, 766, 7, 4, 0, 0, 766, 104, 1, 0, 0, 0, 767, 768, 5, 46, 0, 0, 768, 106, 1, 0, 0, 0, 769, 770, 7, 15, 0, 0, 770, 771, 7, 12, 0, 0, 771, 772, 7, 13, 0, 0, 772, 773, 7, 2, 0, 0, 773, 774, 7, 3, 0, 0, 774, 108, 1, 0, 0, 0, 775, 776, 7, 15, 0, 0, 776, 777, 7, 1, 0, 0, 777, 778, 7, 6, 0, 0, 778, 779, 7, 2, 0, 0, 779, 780, 7, 5, 0, 0, 780, 110, 1, 0, 0, 0, 781, 782, 7, 1, 0, 0, 782, 783, 7, 9, 0, 0, 783, 112, 1, 0, 0, 0, 784, 785, 7, 1, 0, 0, 785, 786, 7, 2, 0, 0, 786, 114, 1, 0, 0, 0, 787, 788, 7, 13, 0, 0, 788, 789, 7, 12, 0, 0, 789, 790, 7, 2, 0, 0, 790, 791, 7, 5, 0, 0, 791, 116, 1, 0, 0, 0, 792, 793, 7, 13, 0, 0, 793, 794, 7, 1, 0, 0, 794, 795, 7, 18, 0, 0, 795, 796, 7, 3, 0, 0, 796, 118, 1, 0, 0, 0, 797, 798, 5, 40, 0, 0, 798, 120, 1, 0, 0, 0, 799, 800, 7, 9, 0, 0, 800, 801, 7, 7, 0, 0, 801, 802, 7, 5, 0, 0, 802, 122, 1, 0, 0, 0, 803, 804, 7, 9, 0, 0, 804, 805, 7, 20, 0, 0, 805, 806, 7, 13, 0, 0, 806, 807, 7, 13, 0, 0, 807, 124, 1, 0, 0, 0, 808, 809, 7, 9, 0, 0, 809, 810, 7, 20, 0, 0, 810, 811, 7, 13, 0, 0, 811, 812, 7, 13, 0, 0, 812, 813, 7, 2, 0, 0, 813, 126, 1, 0, 0, 0, 814, 815, 7, 7, 0, 0, 815, 816, 7, 6, 0, 0, 816, 128, 1, 0, 0, 0, 817, 818, 5, 63, 0, 0, 818, 130, 1, 0, 0, 0, 819, 820, 7, 6, 0, 0, 820, 821, 7, 13, 0, 0, 821, 822, 7, 1, 0, 0, 822, 823, 7, 18, 0, 0, 823, 824, 7, 3, 0, 0, 824, 132, 1, 0, 0, 0, 825, 826, 5, 41, 0, 0, 826, 134, 1, 0, 0, 0, 827, 828, 7, 5, 0, 0, 828, 829, 7, 6, 0, 0, 829, 830, 7, 20, 0, 0, 830, 831, 7, 3, 0, 0, 831, 136, 1, 0, 0, 0, 832, 833, 5, 61, 0, 0, 833, 834, 5, 61, 0, 0, 834, 138, 1, 0, 0, 0, 835, 836, 5, 61, 0, 0, 836, 837, 5, 126, 0, 0, 837, 140, 1, 0, 0, 0, 838, 839, 5, 33, 0, 0, 839, 840, 5, 61, 0, 0, 840, 142, 1, 0, 0, 0, 841, 842, 5, 60, 0, 0, 842, 144, 1, 0, 0, 0, 843, 844, 5, 60, 0, 0, 844, 845, 5, 61, 0, 0, 845, 146, 1, 0, 0, 0, 846, 847, 5, 62, 0, 0, 847, 148, 1, 0, 0, 0, 848, 849, 5, 62, 0, 0, 849, 850, 5, 61, 0, 0, 850, 150, 1, 0, 0, 0, 851, 852, 5, 43, 0, 0, 852, 152, 1, 0, 0, 0, 853, 854, 5, 45, 0, 0, 854, 154, 1, 0, 0, 0, 855, 856, 5, 42, 0, 0, 856, 156, 1, 0, 0, 0, 857, 858, 5, 47, 0, 0, 858, 158, 1, 0, 0, 0, 859, 860, 5, 37, 0, 0, 860, 160, 1, 0, 0, 0, 861, 862, 4, 73, 3, 0, 862, 863, 3, 61, 23, 0, 863, 864, 1, 0, 0, 0, 864, 865, 6, 73, 12, 0, 865, 162, 1, 0, 0, 0, 866, 867, 3, 45, 15, 0, 867, 868, 1, 0, 0, 0, 868, 869, 6, 74, 13, 0, 869, 164, 1, 0, 0, 0, 870, 873, 3, 129, 57, 0, 871, 874, 3, 67, 26, 0, 872, 874, 3, 81, 33, 0, 873, 871, 1, 0, 0, 0, 873, 872, 1, 0, 0, 0, 874, 878, 1, 0, 0, 0, 875, 877, 3, 83, 34, 0, 876, 875, 1, 0, 0, 0, 877, 880, 1, 0, 0, 0, 878, 876, 1, 0, 0, 0, 878, 879, 1, 0, 0, 0, 879, 888, 1, 0, 0, 0, 880, 878, 1, 0, 0, 0, 881, 883, 3, 129, 57, 0, 882, 884, 3, 65, 25, 0, 883, 882, 1, 0, 0, 0, 884, 885, 1, 0, 0, 0, 885, 883, 1, 0, 0, 0, 885, 886, 1, 0, 0, 0, 886, 888, 1, 0, 0, 0, 887, 870, 1, 0, 0, 0, 887, 881, 1, 0, 0, 0, 888, 166, 1, 0, 0, 0, 889, 890, 5, 91, 0, 0, 890, 891, 1, 0, 0, 0, 891, 892, 6, 76, 0, 0, 892, 893, 6, 76, 0, 0, 893, 168, 1, 0, 0, 0, 894, 895, 5, 93, 0, 0, 895, 896, 1, 0, 0, 0, 896, 897, 6, 77, 11, 0, 897, 898, 6, 77, 11, 0, 898, 170, 1, 0, 0, 0, 899, 903, 3, 67, 26, 0, 900, 902, 3, 83, 34, 0, 901, 900, 1, 0, 0, 0, 902, 905, 1, 0, 0, 0, 903, 901, 1, 0, 0, 0, 903, 904, 1, 0, 0, 0, 904, 916, 1, 0, 0, 0, 905, 903, 1, 0, 0, 0, 906, 909, 3, 81, 33, 0, 907, 909, 3, 75, 30, 0, 908, 906, 1, 0, 0, 0, 908, 907, 1, 0, 0, 0, 909, 911, 1, 0, 0, 0, 910, 912, 3, 83, 34, 0, 911, 910, 1, 0, 0, 0, 912, 913, 1, 0, 0, 0, 913, 911, 1, 0, 0, 0, 913, 914, 1, 0, 0, 0, 914, 916, 1, 0, 0, 0, 915, 899, 1, 0, 0, 0, 915, 908, 1, 0, 0, 0, 916, 172, 1, 0, 0, 0, 917, 919, 3, 77, 31, 0, 918, 920, 3, 79, 32, 0, 919, 918, 1, 0, 0, 0, 920, 921, 1, 0, 0, 0, 921, 919, 1, 0, 0, 0, 921, 922, 1, 0, 0, 0, 922, 923, 1, 0, 0, 0, 923, 924, 3, 77, 31, 0, 924, 174, 1, 0, 0, 0, 925, 926, 3, 173, 79, 0, 926, 176, 1, 0, 0, 0, 927, 928, 3, 55, 20, 0, 928, 929, 1, 0, 0, 0, 929, 930, 6, 81, 10, 0, 930, 178, 1, 0, 0, 0, 931, 932, 3, 57, 21, 0, 932, 933, 1, 0, 0, 0, 933, 934, 6, 82, 10, 0, 934, 180, 1, 0, 0, 0, 935, 936, 3, 59, 22, 0, 936, 937, 1, 0, 0, 0, 937, 938, 6, 83, 10, 0, 938, 182, 1, 0, 0, 0, 939, 940, 3, 167, 76, 0, 940, 941, 1, 0, 0, 0, 941, 942, 6, 84, 14, 0, 942, 943, 6, 84, 15, 0, 943, 184, 1, 0, 0, 0, 944, 945, 3, 63, 24, 0, 945, 946, 1, 0, 0, 0, 946, 947, 6, 85, 16, 0, 947, 948, 6, 85, 11, 0, 948, 186, 1, 0, 0, 0, 949, 950, 3, 59, 22, 0, 950, 951, 1, 0, 0, 0, 951, 952, 6, 86, 10, 0, 952, 188, 1, 0, 0, 0, 953, 954, 3, 55, 20, 0, 954, 955, 1, 0, 0, 0, 955, 956, 6, 87, 10, 0, 956, 190, 1, 0, 0, 0, 957, 958, 3, 57, 21, 0, 958, 959, 1, 0, 0, 0, 959, 960, 6, 88, 10, 0, 960, 192, 1, 0, 0, 0, 961, 962, 3, 63, 24, 0, 962, 963, 1, 0, 0, 0, 963, 964, 6, 89, 16, 0, 964, 965, 6, 89, 11, 0, 965, 194, 1, 0, 0, 0, 966, 967, 3, 167, 76, 0, 967, 968, 1, 0, 0, 0, 968, 969, 6, 90, 14, 0, 969, 196, 1, 0, 0, 0, 970, 971, 3, 169, 77, 0, 971, 972, 1, 0, 0, 0, 972, 973, 6, 91, 17, 0, 973, 198, 1, 0, 0, 0, 974, 975, 3, 61, 23, 0, 975, 976, 1, 0, 0, 0, 976, 977, 6, 92, 12, 0, 977, 200, 1, 0, 0, 0, 978, 979, 3, 101, 43, 0, 979, 980, 1, 0, 0, 0, 980, 981, 6, 93, 18, 0, 981, 202, 1, 0, 0, 0, 982, 983, 3, 97, 41, 0, 983, 984, 1, 0, 0, 0, 984, 985, 6, 94, 19, 0, 985, 204, 1, 0, 0, 0, 986, 987, 7, 16, 0, 0, 987, 988, 7, 3, 0, 0, 988, 989, 7, 5, 0, 0, 989, 990, 7, 12, 0, 0, 990, 991, 7, 0, 0, 0, 991, 992, 7, 12, 0, 0, 992, 993, 7, 5, 0, 0, 993, 994, 7, 12, 0, 0, 994, 206, 1, 0, 0, 0, 995, 999, 8, 32, 0, 0, 996, 997, 5, 47, 0, 0, 997, 999, 8, 33, 0, 0, 998, 995, 1, 0, 0, 0, 998, 996, 1, 0, 0, 0, 999, 208, 1, 0, 0, 0, 1000, 1002, 3, 207, 96, 0, 1001, 1000, 1, 0, 0, 0, 1002, 1003, 1, 0, 0, 0, 1003, 1001, 1, 0, 0, 0, 1003, 1004, 1, 0, 0, 0, 1004, 210, 1, 0, 0, 0, 1005, 1006, 3, 209, 97, 0, 1006, 1007, 1, 0, 0, 0, 1007, 1008, 6, 98, 20, 0, 1008, 212, 1, 0, 0, 0, 1009, 1010, 3, 85, 35, 0, 1010, 1011, 1, 0, 0, 0, 1011, 1012, 6, 99, 21, 0, 1012, 214, 1, 0, 0, 0, 1013, 1014, 3, 55, 20, 0, 1014, 1015, 1, 0, 0, 0, 1015, 1016, 6, 100, 10, 0, 1016, 216, 1, 0, 0, 0, 1017, 1018, 3, 57, 21, 0, 1018, 1019, 1, 0, 0, 0, 1019, 1020, 6, 101, 10, 0, 1020, 218, 1, 0, 0, 0, 1021, 1022, 3, 59, 22, 0, 1022, 1023, 1, 0, 0, 0, 1023, 1024, 6, 102, 10, 0, 1024, 220, 1, 0, 0, 0, 1025, 1026, 3, 63, 24, 0, 1026, 1027, 1, 0, 0, 0, 1027, 1028, 6, 103, 16, 0, 1028, 1029, 6, 103, 11, 0, 1029, 222, 1, 0, 0, 0, 1030, 1031, 3, 105, 45, 0, 1031, 1032, 1, 0, 0, 0, 1032, 1033, 6, 104, 22, 0, 1033, 224, 1, 0, 0, 0, 1034, 1035, 3, 101, 43, 0, 1035, 1036, 1, 0, 0, 0, 1036, 1037, 6, 105, 18, 0, 1037, 226, 1, 0, 0, 0, 1038, 1039, 4, 106, 4, 0, 1039, 1040, 3, 129, 57, 0, 1040, 1041, 1, 0, 0, 0, 1041, 1042, 6, 106, 23, 0, 1042, 228, 1, 0, 0, 0, 1043, 1044, 4, 107, 5, 0, 1044, 1045, 3, 165, 75, 0, 1045, 1046, 1, 0, 0, 0, 1046, 1047, 6, 107, 24, 0, 1047, 230, 1, 0, 0, 0, 1048, 1053, 3, 67, 26, 0, 1049, 1053, 3, 65, 25, 0, 1050, 1053, 3, 81, 33, 0, 1051, 1053, 3, 155, 70, 0, 1052, 1048, 1, 0, 0, 0, 1052, 1049, 1, 0, 0, 0, 1052, 1050, 1, 0, 0, 0, 1052, 1051, 1, 0, 0, 0, 1053, 232, 1, 0, 0, 0, 1054, 1057, 3, 67, 26, 0, 1055, 1057, 3, 155, 70, 0, 1056, 1054, 1, 0, 0, 0, 1056, 1055, 1, 0, 0, 0, 1057, 1061, 1, 0, 0, 0, 1058, 1060, 3, 231, 108, 0, 1059, 1058, 1, 0, 0, 0, 1060, 1063, 1, 0, 0, 0, 1061, 1059, 1, 0, 0, 0, 1061, 1062, 1, 0, 0, 0, 1062, 1074, 1, 0, 0, 0, 1063, 1061, 1, 0, 0, 0, 1064, 1067, 3, 81, 33, 0, 1065, 1067, 3, 75, 30, 0, 1066, 1064, 1, 0, 0, 0, 1066, 1065, 1, 0, 0, 0, 1067, 1069, 1, 0, 0, 0, 1068, 1070, 3, 231, 108, 0, 1069, 1068, 1, 0, 0, 0, 1070, 1071, 1, 0, 0, 0, 1071, 1069, 1, 0, 0, 0, 1071, 1072, 1, 0, 0, 0, 1072, 1074, 1, 0, 0, 0, 1073, 1056, 1, 0, 0, 0, 1073, 1066, 1, 0, 0, 0, 1074, 234, 1, 0, 0, 0, 1075, 1078, 3, 233, 109, 0, 1076, 1078, 3, 173, 79, 0, 1077, 1075, 1, 0, 0, 0, 1077, 1076, 1, 0, 0, 0, 1078, 1079, 1, 0, 0, 0, 1079, 1077, 1, 0, 0, 0, 1079, 1080, 1, 0, 0, 0, 1080, 236, 1, 0, 0, 0, 1081, 1082, 3, 55, 20, 0, 1082, 1083, 1, 0, 0, 0, 1083, 1084, 6, 111, 10, 0, 1084, 238, 1, 0, 0, 0, 1085, 1086, 3, 57, 21, 0, 1086, 1087, 1, 0, 0, 0, 1087, 1088, 6, 112, 10, 0, 1088, 240, 1, 0, 0, 0, 1089, 1090, 3, 59, 22, 0, 1090, 1091, 1, 0, 0, 0, 1091, 1092, 6, 113, 10, 0, 1092, 242, 1, 0, 0, 0, 1093, 1094, 3, 63, 24, 0, 1094, 1095, 1, 0, 0, 0, 1095, 1096, 6, 114, 16, 0, 1096, 1097, 6, 114, 11, 0, 1097, 244, 1, 0, 0, 0, 1098, 1099, 3, 97, 41, 0, 1099, 1100, 1, 0, 0, 0, 1100, 1101, 6, 115, 19, 0, 1101, 246, 1, 0, 0, 0, 1102, 1103, 3, 101, 43, 0, 1103, 1104, 1, 0, 0, 0, 1104, 1105, 6, 116, 18, 0, 1105, 248, 1, 0, 0, 0, 1106, 1107, 3, 105, 45, 0, 1107, 1108, 1, 0, 0, 0, 1108, 1109, 6, 117, 22, 0, 1109, 250, 1, 0, 0, 0, 1110, 1111, 4, 118, 6, 0, 1111, 1112, 3, 129, 57, 0, 1112, 1113, 1, 0, 0, 0, 1113, 1114, 6, 118, 23, 0, 1114, 252, 1, 0, 0, 0, 1115, 1116, 4, 119, 7, 0, 1116, 1117, 3, 165, 75, 0, 1117, 1118, 1, 0, 0, 0, 1118, 1119, 6, 119, 24, 0, 1119, 254, 1, 0, 0, 0, 1120, 1121, 7, 12, 0, 0, 1121, 1122, 7, 2, 0, 0, 1122, 256, 1, 0, 0, 0, 1123, 1124, 3, 235, 110, 0, 1124, 1125, 1, 0, 0, 0, 1125, 1126, 6, 121, 25, 0, 1126, 258, 1, 0, 0, 0, 1127, 1128, 3, 55, 20, 0, 1128, 1129, 1, 0, 0, 0, 1129, 1130, 6, 122, 10, 0, 1130, 260, 1, 0, 0, 0, 1131, 1132, 3, 57, 21, 0, 1132, 1133, 1, 0, 0, 0, 1133, 1134, 6, 123, 10, 0, 1134, 262, 1, 0, 0, 0, 1135, 1136, 3, 59, 22, 0, 1136, 1137, 1, 0, 0, 0, 1137, 1138, 6, 124, 10, 0, 1138, 264, 1, 0, 0, 0, 1139, 1140, 3, 63, 24, 0, 1140, 1141, 1, 0, 0, 0, 1141, 1142, 6, 125, 16, 0, 1142, 1143, 6, 125, 11, 0, 1143, 266, 1, 0, 0, 0, 1144, 1145, 3, 167, 76, 0, 1145, 1146, 1, 0, 0, 0, 1146, 1147, 6, 126, 14, 0, 1147, 1148, 6, 126, 26, 0, 1148, 268, 1, 0, 0, 0, 1149, 1150, 7, 7, 0, 0, 1150, 1151, 7, 9, 0, 0, 1151, 1152, 1, 0, 0, 0, 1152, 1153, 6, 127, 27, 0, 1153, 270, 1, 0, 0, 0, 1154, 1155, 7, 19, 0, 0, 1155, 1156, 7, 1, 0, 0, 1156, 1157, 7, 5, 0, 0, 1157, 1158, 7, 10, 0, 0, 1158, 1159, 1, 0, 0, 0, 1159, 1160, 6, 128, 27, 0, 1160, 272, 1, 0, 0, 0, 1161, 1162, 8, 34, 0, 0, 1162, 274, 1, 0, 0, 0, 1163, 1165, 3, 273, 129, 0, 1164, 1163, 1, 0, 0, 0, 1165, 1166, 1, 0, 0, 0, 1166, 1164, 1, 0, 0, 0, 1166, 1167, 1, 0, 0, 0, 1167, 1168, 1, 0, 0, 0, 1168, 1169, 3, 61, 23, 0, 1169, 1171, 1, 0, 0, 0, 1170, 1164, 1, 0, 0, 0, 1170, 1171, 1, 0, 0, 0, 1171, 1173, 1, 0, 0, 0, 1172, 1174, 3, 273, 129, 0, 1173, 1172, 1, 0, 0, 0, 1174, 1175, 1, 0, 0, 0, 1175, 1173, 1, 0, 0, 0, 1175, 1176, 1, 0, 0, 0, 1176, 276, 1, 0, 0, 0, 1177, 1178, 3, 275, 130, 0, 1178, 1179, 1, 0, 0, 0, 1179, 1180, 6, 131, 28, 0, 1180, 278, 1, 0, 0, 0, 1181, 1182, 3, 55, 20, 0, 1182, 1183, 1, 0, 0, 0, 1183, 1184, 6, 132, 10, 0, 1184, 280, 1, 0, 0, 0, 1185, 1186, 3, 57, 21, 0, 1186, 1187, 1, 0, 0, 0, 1187, 1188, 6, 133, 10, 0, 1188, 282, 1, 0, 0, 0, 1189, 1190, 3, 59, 22, 0, 1190, 1191, 1, 0, 0, 0, 1191, 1192, 6, 134, 10, 0, 1192, 284, 1, 0, 0, 0, 1193, 1194, 3, 63, 24, 0, 1194, 1195, 1, 0, 0, 0, 1195, 1196, 6, 135, 16, 0, 1196, 1197, 6, 135, 11, 0, 1197, 1198, 6, 135, 11, 0, 1198, 286, 1, 0, 0, 0, 1199, 1200, 3, 97, 41, 0, 1200, 1201, 1, 0, 0, 0, 1201, 1202, 6, 136, 19, 0, 1202, 288, 1, 0, 0, 0, 1203, 1204, 3, 101, 43, 0, 1204, 1205, 1, 0, 0, 0, 1205, 1206, 6, 137, 18, 0, 1206, 290, 1, 0, 0, 0, 1207, 1208, 3, 105, 45, 0, 1208, 1209, 1, 0, 0, 0, 1209, 1210, 6, 138, 22, 0, 1210, 292, 1, 0, 0, 0, 1211, 1212, 3, 271, 128, 0, 1212, 1213, 1, 0, 0, 0, 1213, 1214, 6, 139, 29, 0, 1214, 294, 1, 0, 0, 0, 1215, 1216, 3, 235, 110, 0, 1216, 1217, 1, 0, 0, 0, 1217, 1218, 6, 140, 25, 0, 1218, 296, 1, 0, 0, 0, 1219, 1220, 3, 175, 80, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1222, 6, 141, 30, 0, 1222, 298, 1, 0, 0, 0, 1223, 1224, 4, 142, 8, 0, 1224, 1225, 3, 129, 57, 0, 1225, 1226, 1, 0, 0, 0, 1226, 1227, 6, 142, 23, 0, 1227, 300, 1, 0, 0, 0, 1228, 1229, 4, 143, 9, 0, 1229, 1230, 3, 165, 75, 0, 1230, 1231, 1, 0, 0, 0, 1231, 1232, 6, 143, 24, 0, 1232, 302, 1, 0, 0, 0, 1233, 1234, 3, 55, 20, 0, 1234, 1235, 1, 0, 0, 0, 1235, 1236, 6, 144, 10, 0, 1236, 304, 1, 0, 0, 0, 1237, 1238, 3, 57, 21, 0, 1238, 1239, 1, 0, 0, 0, 1239, 1240, 6, 145, 10, 0, 1240, 306, 1, 0, 0, 0, 1241, 1242, 3, 59, 22, 0, 1242, 1243, 1, 0, 0, 0, 1243, 1244, 6, 146, 10, 0, 1244, 308, 1, 0, 0, 0, 1245, 1246, 3, 63, 24, 0, 1246, 1247, 1, 0, 0, 0, 1247, 1248, 6, 147, 16, 0, 1248, 1249, 6, 147, 11, 0, 1249, 310, 1, 0, 0, 0, 1250, 1251, 3, 105, 45, 0, 1251, 1252, 1, 0, 0, 0, 1252, 1253, 6, 148, 22, 0, 1253, 312, 1, 0, 0, 0, 1254, 1255, 4, 149, 10, 0, 1255, 1256, 3, 129, 57, 0, 1256, 1257, 1, 0, 0, 0, 1257, 1258, 6, 149, 23, 0, 1258, 314, 1, 0, 0, 0, 1259, 1260, 4, 150, 11, 0, 1260, 1261, 3, 165, 75, 0, 1261, 1262, 1, 0, 0, 0, 1262, 1263, 6, 150, 24, 0, 1263, 316, 1, 0, 0, 0, 1264, 1265, 3, 175, 80, 0, 1265, 1266, 1, 0, 0, 0, 1266, 1267, 6, 151, 30, 0, 1267, 318, 1, 0, 0, 0, 1268, 1269, 3, 171, 78, 0, 1269, 1270, 1, 0, 0, 0, 1270, 1271, 6, 152, 31, 0, 1271, 320, 1, 0, 0, 0, 1272, 1273, 3, 55, 20, 0, 1273, 1274, 1, 0, 0, 0, 1274, 1275, 6, 153, 10, 0, 1275, 322, 1, 0, 0, 0, 1276, 1277, 3, 57, 21, 0, 1277, 1278, 1, 0, 0, 0, 1278, 1279, 6, 154, 10, 0, 1279, 324, 1, 0, 0, 0, 1280, 1281, 3, 59, 22, 0, 1281, 1282, 1, 0, 0, 0, 1282, 1283, 6, 155, 10, 0, 1283, 326, 1, 0, 0, 0, 1284, 1285, 3, 63, 24, 0, 1285, 1286, 1, 0, 0, 0, 1286, 1287, 6, 156, 16, 0, 1287, 1288, 6, 156, 11, 0, 1288, 328, 1, 0, 0, 0, 1289, 1290, 7, 1, 0, 0, 1290, 1291, 7, 9, 0, 0, 1291, 1292, 7, 15, 0, 0, 1292, 1293, 7, 7, 0, 0, 1293, 330, 1, 0, 0, 0, 1294, 1295, 3, 55, 20, 0, 1295, 1296, 1, 0, 0, 0, 1296, 1297, 6, 158, 10, 0, 1297, 332, 1, 0, 0, 0, 1298, 1299, 3, 57, 21, 0, 1299, 1300, 1, 0, 0, 0, 1300, 1301, 6, 159, 10, 0, 1301, 334, 1, 0, 0, 0, 1302, 1303, 3, 59, 22, 0, 1303, 1304, 1, 0, 0, 0, 1304, 1305, 6, 160, 10, 0, 1305, 336, 1, 0, 0, 0, 1306, 1307, 3, 169, 77, 0, 1307, 1308, 1, 0, 0, 0, 1308, 1309, 6, 161, 17, 0, 1309, 1310, 6, 161, 11, 0, 1310, 338, 1, 0, 0, 0, 1311, 1312, 3, 61, 23, 0, 1312, 1313, 1, 0, 0, 0, 1313, 1314, 6, 162, 12, 0, 1314, 340, 1, 0, 0, 0, 1315, 1321, 3, 75, 30, 0, 1316, 1321, 3, 65, 25, 0, 1317, 1321, 3, 105, 45, 0, 1318, 1321, 3, 67, 26, 0, 1319, 1321, 3, 81, 33, 0, 1320, 1315, 1, 0, 0, 0, 1320, 1316, 1, 0, 0, 0, 1320, 1317, 1, 0, 0, 0, 1320, 1318, 1, 0, 0, 0, 1320, 1319, 1, 0, 0, 0, 1321, 1322, 1, 0, 0, 0, 1322, 1320, 1, 0, 0, 0, 1322, 1323, 1, 0, 0, 0, 1323, 342, 1, 0, 0, 0, 1324, 1325, 3, 55, 20, 0, 1325, 1326, 1, 0, 0, 0, 1326, 1327, 6, 164, 10, 0, 1327, 344, 1, 0, 0, 0, 1328, 1329, 3, 57, 21, 0, 1329, 1330, 1, 0, 0, 0, 1330, 1331, 6, 165, 10, 0, 1331, 346, 1, 0, 0, 0, 1332, 1333, 3, 59, 22, 0, 1333, 1334, 1, 0, 0, 0, 1334, 1335, 6, 166, 10, 0, 1335, 348, 1, 0, 0, 0, 1336, 1337, 3, 63, 24, 0, 1337, 1338, 1, 0, 0, 0, 1338, 1339, 6, 167, 16, 0, 1339, 1340, 6, 167, 11, 0, 1340, 350, 1, 0, 0, 0, 1341, 1342, 3, 61, 23, 0, 1342, 1343, 1, 0, 0, 0, 1343, 1344, 6, 168, 12, 0, 1344, 352, 1, 0, 0, 0, 1345, 1346, 3, 101, 43, 0, 1346, 1347, 1, 0, 0, 0, 1347, 1348, 6, 169, 18, 0, 1348, 354, 1, 0, 0, 0, 1349, 1350, 3, 105, 45, 0, 1350, 1351, 1, 0, 0, 0, 1351, 1352, 6, 170, 22, 0, 1352, 356, 1, 0, 0, 0, 1353, 1354, 3, 269, 127, 0, 1354, 1355, 1, 0, 0, 0, 1355, 1356, 6, 171, 32, 0, 1356, 1357, 6, 171, 33, 0, 1357, 358, 1, 0, 0, 0, 1358, 1359, 3, 209, 97, 0, 1359, 1360, 1, 0, 0, 0, 1360, 1361, 6, 172, 20, 0, 1361, 360, 1, 0, 0, 0, 1362, 1363, 3, 85, 35, 0, 1363, 1364, 1, 0, 0, 0, 1364, 1365, 6, 173, 21, 0, 1365, 362, 1, 0, 0, 0, 1366, 1367, 3, 55, 20, 0, 1367, 1368, 1, 0, 0, 0, 1368, 1369, 6, 174, 10, 0, 1369, 364, 1, 0, 0, 0, 1370, 1371, 3, 57, 21, 0, 1371, 1372, 1, 0, 0, 0, 1372, 1373, 6, 175, 10, 0, 1373, 366, 1, 0, 0, 0, 1374, 1375, 3, 59, 22, 0, 1375, 1376, 1, 0, 0, 0, 1376, 1377, 6, 176, 10, 0, 1377, 368, 1, 0, 0, 0, 1378, 1379, 3, 63, 24, 0, 1379, 1380, 1, 0, 0, 0, 1380, 1381, 6, 177, 16, 0, 1381, 1382, 6, 177, 11, 0, 1382, 1383, 6, 177, 11, 0, 1383, 370, 1, 0, 0, 0, 1384, 1385, 3, 101, 43, 0, 1385, 1386, 1, 0, 0, 0, 1386, 1387, 6, 178, 18, 0, 1387, 372, 1, 0, 0, 0, 1388, 1389, 3, 105, 45, 0, 1389, 1390, 1, 0, 0, 0, 1390, 1391, 6, 179, 22, 0, 1391, 374, 1, 0, 0, 0, 1392, 1393, 3, 235, 110, 0, 1393, 1394, 1, 0, 0, 0, 1394, 1395, 6, 180, 25, 0, 1395, 376, 1, 0, 0, 0, 1396, 1397, 3, 55, 20, 0, 1397, 1398, 1, 0, 0, 0, 1398, 1399, 6, 181, 10, 0, 1399, 378, 1, 0, 0, 0, 1400, 1401, 3, 57, 21, 0, 1401, 1402, 1, 0, 0, 0, 1402, 1403, 6, 182, 10, 0, 1403, 380, 1, 0, 0, 0, 1404, 1405, 3, 59, 22, 0, 1405, 1406, 1, 0, 0, 0, 1406, 1407, 6, 183, 10, 0, 1407, 382, 1, 0, 0, 0, 1408, 1409, 3, 63, 24, 0, 1409, 1410, 1, 0, 0, 0, 1410, 1411, 6, 184, 16, 0, 1411, 1412, 6, 184, 11, 0, 1412, 384, 1, 0, 0, 0, 1413, 1414, 3, 209, 97, 0, 1414, 1415, 1, 0, 0, 0, 1415, 1416, 6, 185, 20, 0, 1416, 1417, 6, 185, 11, 0, 1417, 1418, 6, 185, 34, 0, 1418, 386, 1, 0, 0, 0, 1419, 1420, 3, 85, 35, 0, 1420, 1421, 1, 0, 0, 0, 1421, 1422, 6, 186, 21, 0, 1422, 1423, 6, 186, 11, 0, 1423, 1424, 6, 186, 34, 0, 1424, 388, 1, 0, 0, 0, 1425, 1426, 3, 55, 20, 0, 1426, 1427, 1, 0, 0, 0, 1427, 1428, 6, 187, 10, 0, 1428, 390, 1, 0, 0, 0, 1429, 1430, 3, 57, 21, 0, 1430, 1431, 1, 0, 0, 0, 1431, 1432, 6, 188, 10, 0, 1432, 392, 1, 0, 0, 0, 1433, 1434, 3, 59, 22, 0, 1434, 1435, 1, 0, 0, 0, 1435, 1436, 6, 189, 10, 0, 1436, 394, 1, 0, 0, 0, 1437, 1438, 3, 61, 23, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1440, 6, 190, 12, 0, 1440, 1441, 6, 190, 11, 0, 1441, 1442, 6, 190, 9, 0, 1442, 396, 1, 0, 0, 0, 1443, 1444, 3, 101, 43, 0, 1444, 1445, 1, 0, 0, 0, 1445, 1446, 6, 191, 18, 0, 1446, 1447, 6, 191, 11, 0, 1447, 1448, 6, 191, 9, 0, 1448, 398, 1, 0, 0, 0, 1449, 1450, 3, 55, 20, 0, 1450, 1451, 1, 0, 0, 0, 1451, 1452, 6, 192, 10, 0, 1452, 400, 1, 0, 0, 0, 1453, 1454, 3, 57, 21, 0, 1454, 1455, 1, 0, 0, 0, 1455, 1456, 6, 193, 10, 0, 1456, 402, 1, 0, 0, 0, 1457, 1458, 3, 59, 22, 0, 1458, 1459, 1, 0, 0, 0, 1459, 1460, 6, 194, 10, 0, 1460, 404, 1, 0, 0, 0, 1461, 1462, 3, 175, 80, 0, 1462, 1463, 1, 0, 0, 0, 1463, 1464, 6, 195, 11, 0, 1464, 1465, 6, 195, 0, 0, 1465, 1466, 6, 195, 30, 0, 1466, 406, 1, 0, 0, 0, 1467, 1468, 3, 171, 78, 0, 1468, 1469, 1, 0, 0, 0, 1469, 1470, 6, 196, 11, 0, 1470, 1471, 6, 196, 0, 0, 1471, 1472, 6, 196, 31, 0, 1472, 408, 1, 0, 0, 0, 1473, 1474, 3, 91, 38, 0, 1474, 1475, 1, 0, 0, 0, 1475, 1476, 6, 197, 11, 0, 1476, 1477, 6, 197, 0, 0, 1477, 1478, 6, 197, 35, 0, 1478, 410, 1, 0, 0, 0, 1479, 1480, 3, 63, 24, 0, 1480, 1481, 1, 0, 0, 0, 1481, 1482, 6, 198, 16, 0, 1482, 1483, 6, 198, 11, 0, 1483, 412, 1, 0, 0, 0, 65, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 581, 591, 595, 598, 607, 609, 620, 641, 646, 655, 662, 667, 669, 680, 688, 691, 693, 698, 703, 709, 716, 721, 727, 730, 738, 742, 873, 878, 885, 887, 903, 908, 913, 915, 921, 998, 1003, 1052, 1056, 1061, 1066, 1071, 1073, 1077, 1079, 1166, 1170, 1175, 1320, 1322, 36, 5, 1, 0, 5, 4, 0, 5, 6, 0, 5, 2, 0, 5, 3, 0, 5, 8, 0, 5, 5, 0, 5, 9, 0, 5, 11, 0, 5, 13, 0, 0, 1, 0, 4, 0, 0, 7, 24, 0, 7, 16, 0, 7, 65, 0, 5, 0, 0, 7, 25, 0, 7, 66, 0, 7, 34, 0, 7, 32, 0, 7, 76, 0, 7, 26, 0, 7, 36, 0, 7, 48, 0, 7, 64, 0, 7, 80, 0, 5, 10, 0, 5, 7, 0, 7, 90, 0, 7, 89, 0, 7, 68, 0, 7, 67, 0, 7, 88, 0, 5, 12, 0, 5, 14, 0, 7, 29, 0] \ No newline at end of file +[4, 0, 128, 1601, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 2, 195, 7, 195, 2, 196, 7, 196, 2, 197, 7, 197, 2, 198, 7, 198, 2, 199, 7, 199, 2, 200, 7, 200, 2, 201, 7, 201, 2, 202, 7, 202, 2, 203, 7, 203, 2, 204, 7, 204, 2, 205, 7, 205, 2, 206, 7, 206, 2, 207, 7, 207, 2, 208, 7, 208, 2, 209, 7, 209, 2, 210, 7, 210, 2, 211, 7, 211, 2, 212, 7, 212, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 4, 24, 654, 8, 24, 11, 24, 12, 24, 655, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 664, 8, 25, 10, 25, 12, 25, 667, 9, 25, 1, 25, 3, 25, 670, 8, 25, 1, 25, 3, 25, 673, 8, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 5, 26, 682, 8, 26, 10, 26, 12, 26, 685, 9, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 4, 27, 693, 8, 27, 11, 27, 12, 27, 694, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 33, 1, 33, 3, 33, 714, 8, 33, 1, 33, 4, 33, 717, 8, 33, 11, 33, 12, 33, 718, 1, 34, 1, 34, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 3, 36, 728, 8, 36, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 3, 38, 735, 8, 38, 1, 39, 1, 39, 1, 39, 5, 39, 740, 8, 39, 10, 39, 12, 39, 743, 9, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 5, 39, 751, 8, 39, 10, 39, 12, 39, 754, 9, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 3, 39, 761, 8, 39, 1, 39, 3, 39, 764, 8, 39, 3, 39, 766, 8, 39, 1, 40, 4, 40, 769, 8, 40, 11, 40, 12, 40, 770, 1, 41, 4, 41, 774, 8, 41, 11, 41, 12, 41, 775, 1, 41, 1, 41, 5, 41, 780, 8, 41, 10, 41, 12, 41, 783, 9, 41, 1, 41, 1, 41, 4, 41, 787, 8, 41, 11, 41, 12, 41, 788, 1, 41, 4, 41, 792, 8, 41, 11, 41, 12, 41, 793, 1, 41, 1, 41, 5, 41, 798, 8, 41, 10, 41, 12, 41, 801, 9, 41, 3, 41, 803, 8, 41, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 809, 8, 41, 11, 41, 12, 41, 810, 1, 41, 1, 41, 3, 41, 815, 8, 41, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 74, 1, 74, 1, 75, 1, 75, 1, 76, 1, 76, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, 79, 1, 79, 1, 79, 3, 79, 943, 8, 79, 1, 79, 5, 79, 946, 8, 79, 10, 79, 12, 79, 949, 9, 79, 1, 79, 1, 79, 4, 79, 953, 8, 79, 11, 79, 12, 79, 954, 3, 79, 957, 8, 79, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 5, 82, 971, 8, 82, 10, 82, 12, 82, 974, 9, 82, 1, 82, 1, 82, 3, 82, 978, 8, 82, 1, 82, 4, 82, 981, 8, 82, 11, 82, 12, 82, 982, 3, 82, 985, 8, 82, 1, 83, 1, 83, 4, 83, 989, 8, 83, 11, 83, 12, 83, 990, 1, 83, 1, 83, 1, 84, 1, 84, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 3, 100, 1068, 8, 100, 1, 101, 4, 101, 1071, 8, 101, 11, 101, 12, 101, 1072, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 3, 112, 1122, 8, 112, 1, 113, 1, 113, 3, 113, 1126, 8, 113, 1, 113, 5, 113, 1129, 8, 113, 10, 113, 12, 113, 1132, 9, 113, 1, 113, 1, 113, 3, 113, 1136, 8, 113, 1, 113, 4, 113, 1139, 8, 113, 11, 113, 12, 113, 1140, 3, 113, 1143, 8, 113, 1, 114, 1, 114, 4, 114, 1147, 8, 114, 11, 114, 12, 114, 1148, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 134, 4, 134, 1234, 8, 134, 11, 134, 12, 134, 1235, 1, 134, 1, 134, 3, 134, 1240, 8, 134, 1, 134, 4, 134, 1243, 8, 134, 11, 134, 12, 134, 1244, 1, 135, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 4, 167, 1390, 8, 167, 11, 167, 12, 167, 1391, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 1, 185, 1, 185, 1, 185, 1, 185, 1, 186, 1, 186, 1, 186, 1, 186, 1, 187, 1, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 192, 1, 193, 1, 193, 1, 193, 1, 193, 1, 194, 1, 194, 1, 194, 1, 194, 1, 195, 1, 195, 1, 195, 1, 195, 1, 196, 1, 196, 1, 196, 1, 196, 1, 197, 1, 197, 1, 197, 1, 197, 1, 198, 1, 198, 1, 198, 1, 198, 1, 198, 1, 199, 1, 199, 1, 199, 1, 199, 1, 199, 1, 199, 1, 200, 1, 200, 1, 200, 1, 200, 1, 200, 1, 200, 1, 201, 1, 201, 1, 201, 1, 201, 1, 202, 1, 202, 1, 202, 1, 202, 1, 203, 1, 203, 1, 203, 1, 203, 1, 204, 1, 204, 1, 204, 1, 204, 1, 204, 1, 204, 1, 205, 1, 205, 1, 205, 1, 205, 1, 205, 1, 205, 1, 206, 1, 206, 1, 206, 1, 206, 1, 207, 1, 207, 1, 207, 1, 207, 1, 208, 1, 208, 1, 208, 1, 208, 1, 209, 1, 209, 1, 209, 1, 209, 1, 209, 1, 209, 1, 210, 1, 210, 1, 210, 1, 210, 1, 210, 1, 210, 1, 211, 1, 211, 1, 211, 1, 211, 1, 211, 1, 211, 1, 212, 1, 212, 1, 212, 1, 212, 1, 212, 2, 683, 752, 0, 213, 16, 1, 18, 2, 20, 3, 22, 4, 24, 5, 26, 6, 28, 7, 30, 8, 32, 9, 34, 10, 36, 11, 38, 12, 40, 13, 42, 14, 44, 15, 46, 16, 48, 17, 50, 18, 52, 19, 54, 20, 56, 21, 58, 22, 60, 23, 62, 24, 64, 25, 66, 26, 68, 27, 70, 28, 72, 29, 74, 0, 76, 0, 78, 0, 80, 0, 82, 0, 84, 0, 86, 0, 88, 0, 90, 0, 92, 0, 94, 30, 96, 31, 98, 32, 100, 33, 102, 34, 104, 35, 106, 36, 108, 37, 110, 38, 112, 39, 114, 40, 116, 41, 118, 42, 120, 43, 122, 44, 124, 45, 126, 46, 128, 47, 130, 48, 132, 49, 134, 50, 136, 51, 138, 52, 140, 53, 142, 54, 144, 55, 146, 56, 148, 57, 150, 58, 152, 59, 154, 60, 156, 61, 158, 62, 160, 63, 162, 64, 164, 65, 166, 66, 168, 67, 170, 68, 172, 0, 174, 69, 176, 70, 178, 71, 180, 72, 182, 0, 184, 73, 186, 74, 188, 75, 190, 76, 192, 0, 194, 0, 196, 77, 198, 78, 200, 79, 202, 0, 204, 0, 206, 0, 208, 0, 210, 0, 212, 0, 214, 80, 216, 0, 218, 81, 220, 0, 222, 0, 224, 82, 226, 83, 228, 84, 230, 0, 232, 0, 234, 0, 236, 0, 238, 0, 240, 0, 242, 0, 244, 85, 246, 86, 248, 87, 250, 88, 252, 0, 254, 0, 256, 0, 258, 0, 260, 0, 262, 0, 264, 89, 266, 0, 268, 90, 270, 91, 272, 92, 274, 0, 276, 0, 278, 93, 280, 94, 282, 0, 284, 95, 286, 0, 288, 96, 290, 97, 292, 98, 294, 0, 296, 0, 298, 0, 300, 0, 302, 0, 304, 0, 306, 0, 308, 0, 310, 0, 312, 99, 314, 100, 316, 101, 318, 0, 320, 0, 322, 0, 324, 0, 326, 0, 328, 0, 330, 102, 332, 103, 334, 104, 336, 0, 338, 105, 340, 106, 342, 107, 344, 108, 346, 0, 348, 0, 350, 109, 352, 110, 354, 111, 356, 112, 358, 0, 360, 0, 362, 0, 364, 0, 366, 0, 368, 0, 370, 0, 372, 113, 374, 114, 376, 115, 378, 0, 380, 0, 382, 0, 384, 0, 386, 116, 388, 117, 390, 118, 392, 0, 394, 0, 396, 0, 398, 0, 400, 119, 402, 0, 404, 0, 406, 120, 408, 121, 410, 122, 412, 0, 414, 0, 416, 0, 418, 123, 420, 124, 422, 125, 424, 0, 426, 0, 428, 126, 430, 127, 432, 128, 434, 0, 436, 0, 438, 0, 440, 0, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 36, 2, 0, 68, 68, 100, 100, 2, 0, 73, 73, 105, 105, 2, 0, 83, 83, 115, 115, 2, 0, 69, 69, 101, 101, 2, 0, 67, 67, 99, 99, 2, 0, 84, 84, 116, 116, 2, 0, 82, 82, 114, 114, 2, 0, 79, 79, 111, 111, 2, 0, 80, 80, 112, 112, 2, 0, 78, 78, 110, 110, 2, 0, 72, 72, 104, 104, 2, 0, 86, 86, 118, 118, 2, 0, 65, 65, 97, 97, 2, 0, 76, 76, 108, 108, 2, 0, 88, 88, 120, 120, 2, 0, 70, 70, 102, 102, 2, 0, 77, 77, 109, 109, 2, 0, 71, 71, 103, 103, 2, 0, 75, 75, 107, 107, 2, 0, 87, 87, 119, 119, 2, 0, 85, 85, 117, 117, 2, 0, 74, 74, 106, 106, 6, 0, 9, 10, 13, 13, 32, 32, 47, 47, 91, 91, 93, 93, 2, 0, 10, 10, 13, 13, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 2, 0, 65, 90, 97, 122, 8, 0, 34, 34, 78, 78, 82, 82, 84, 84, 92, 92, 110, 110, 114, 114, 116, 116, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 2, 0, 43, 43, 45, 45, 1, 0, 96, 96, 2, 0, 66, 66, 98, 98, 2, 0, 89, 89, 121, 121, 11, 0, 9, 10, 13, 13, 32, 32, 34, 34, 44, 44, 47, 47, 58, 58, 61, 61, 91, 91, 93, 93, 124, 124, 2, 0, 42, 42, 47, 47, 11, 0, 9, 10, 13, 13, 32, 32, 34, 35, 44, 44, 47, 47, 58, 58, 60, 60, 62, 63, 92, 92, 124, 124, 1628, 0, 16, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 0, 20, 1, 0, 0, 0, 0, 22, 1, 0, 0, 0, 0, 24, 1, 0, 0, 0, 0, 26, 1, 0, 0, 0, 0, 28, 1, 0, 0, 0, 0, 30, 1, 0, 0, 0, 0, 32, 1, 0, 0, 0, 0, 34, 1, 0, 0, 0, 0, 36, 1, 0, 0, 0, 0, 38, 1, 0, 0, 0, 0, 40, 1, 0, 0, 0, 0, 42, 1, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 46, 1, 0, 0, 0, 0, 48, 1, 0, 0, 0, 0, 50, 1, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 54, 1, 0, 0, 0, 0, 56, 1, 0, 0, 0, 0, 58, 1, 0, 0, 0, 0, 60, 1, 0, 0, 0, 0, 62, 1, 0, 0, 0, 0, 64, 1, 0, 0, 0, 0, 66, 1, 0, 0, 0, 0, 68, 1, 0, 0, 0, 0, 70, 1, 0, 0, 0, 1, 72, 1, 0, 0, 0, 1, 94, 1, 0, 0, 0, 1, 96, 1, 0, 0, 0, 1, 98, 1, 0, 0, 0, 1, 100, 1, 0, 0, 0, 1, 102, 1, 0, 0, 0, 1, 104, 1, 0, 0, 0, 1, 106, 1, 0, 0, 0, 1, 108, 1, 0, 0, 0, 1, 110, 1, 0, 0, 0, 1, 112, 1, 0, 0, 0, 1, 114, 1, 0, 0, 0, 1, 116, 1, 0, 0, 0, 1, 118, 1, 0, 0, 0, 1, 120, 1, 0, 0, 0, 1, 122, 1, 0, 0, 0, 1, 124, 1, 0, 0, 0, 1, 126, 1, 0, 0, 0, 1, 128, 1, 0, 0, 0, 1, 130, 1, 0, 0, 0, 1, 132, 1, 0, 0, 0, 1, 134, 1, 0, 0, 0, 1, 136, 1, 0, 0, 0, 1, 138, 1, 0, 0, 0, 1, 140, 1, 0, 0, 0, 1, 142, 1, 0, 0, 0, 1, 144, 1, 0, 0, 0, 1, 146, 1, 0, 0, 0, 1, 148, 1, 0, 0, 0, 1, 150, 1, 0, 0, 0, 1, 152, 1, 0, 0, 0, 1, 154, 1, 0, 0, 0, 1, 156, 1, 0, 0, 0, 1, 158, 1, 0, 0, 0, 1, 160, 1, 0, 0, 0, 1, 162, 1, 0, 0, 0, 1, 164, 1, 0, 0, 0, 1, 166, 1, 0, 0, 0, 1, 168, 1, 0, 0, 0, 1, 170, 1, 0, 0, 0, 1, 172, 1, 0, 0, 0, 1, 174, 1, 0, 0, 0, 1, 176, 1, 0, 0, 0, 1, 178, 1, 0, 0, 0, 1, 180, 1, 0, 0, 0, 1, 184, 1, 0, 0, 0, 1, 186, 1, 0, 0, 0, 1, 188, 1, 0, 0, 0, 1, 190, 1, 0, 0, 0, 2, 192, 1, 0, 0, 0, 2, 194, 1, 0, 0, 0, 2, 196, 1, 0, 0, 0, 2, 198, 1, 0, 0, 0, 2, 200, 1, 0, 0, 0, 3, 202, 1, 0, 0, 0, 3, 204, 1, 0, 0, 0, 3, 206, 1, 0, 0, 0, 3, 208, 1, 0, 0, 0, 3, 210, 1, 0, 0, 0, 3, 212, 1, 0, 0, 0, 3, 214, 1, 0, 0, 0, 3, 218, 1, 0, 0, 0, 3, 220, 1, 0, 0, 0, 3, 222, 1, 0, 0, 0, 3, 224, 1, 0, 0, 0, 3, 226, 1, 0, 0, 0, 3, 228, 1, 0, 0, 0, 4, 230, 1, 0, 0, 0, 4, 232, 1, 0, 0, 0, 4, 234, 1, 0, 0, 0, 4, 236, 1, 0, 0, 0, 4, 238, 1, 0, 0, 0, 4, 244, 1, 0, 0, 0, 4, 246, 1, 0, 0, 0, 4, 248, 1, 0, 0, 0, 4, 250, 1, 0, 0, 0, 5, 252, 1, 0, 0, 0, 5, 254, 1, 0, 0, 0, 5, 256, 1, 0, 0, 0, 5, 258, 1, 0, 0, 0, 5, 260, 1, 0, 0, 0, 5, 262, 1, 0, 0, 0, 5, 264, 1, 0, 0, 0, 5, 266, 1, 0, 0, 0, 5, 268, 1, 0, 0, 0, 5, 270, 1, 0, 0, 0, 5, 272, 1, 0, 0, 0, 6, 274, 1, 0, 0, 0, 6, 276, 1, 0, 0, 0, 6, 278, 1, 0, 0, 0, 6, 280, 1, 0, 0, 0, 6, 284, 1, 0, 0, 0, 6, 286, 1, 0, 0, 0, 6, 288, 1, 0, 0, 0, 6, 290, 1, 0, 0, 0, 6, 292, 1, 0, 0, 0, 7, 294, 1, 0, 0, 0, 7, 296, 1, 0, 0, 0, 7, 298, 1, 0, 0, 0, 7, 300, 1, 0, 0, 0, 7, 302, 1, 0, 0, 0, 7, 304, 1, 0, 0, 0, 7, 306, 1, 0, 0, 0, 7, 308, 1, 0, 0, 0, 7, 310, 1, 0, 0, 0, 7, 312, 1, 0, 0, 0, 7, 314, 1, 0, 0, 0, 7, 316, 1, 0, 0, 0, 8, 318, 1, 0, 0, 0, 8, 320, 1, 0, 0, 0, 8, 322, 1, 0, 0, 0, 8, 324, 1, 0, 0, 0, 8, 326, 1, 0, 0, 0, 8, 328, 1, 0, 0, 0, 8, 330, 1, 0, 0, 0, 8, 332, 1, 0, 0, 0, 8, 334, 1, 0, 0, 0, 9, 336, 1, 0, 0, 0, 9, 338, 1, 0, 0, 0, 9, 340, 1, 0, 0, 0, 9, 342, 1, 0, 0, 0, 9, 344, 1, 0, 0, 0, 10, 346, 1, 0, 0, 0, 10, 348, 1, 0, 0, 0, 10, 350, 1, 0, 0, 0, 10, 352, 1, 0, 0, 0, 10, 354, 1, 0, 0, 0, 10, 356, 1, 0, 0, 0, 11, 358, 1, 0, 0, 0, 11, 360, 1, 0, 0, 0, 11, 362, 1, 0, 0, 0, 11, 364, 1, 0, 0, 0, 11, 366, 1, 0, 0, 0, 11, 368, 1, 0, 0, 0, 11, 370, 1, 0, 0, 0, 11, 372, 1, 0, 0, 0, 11, 374, 1, 0, 0, 0, 11, 376, 1, 0, 0, 0, 12, 378, 1, 0, 0, 0, 12, 380, 1, 0, 0, 0, 12, 382, 1, 0, 0, 0, 12, 384, 1, 0, 0, 0, 12, 386, 1, 0, 0, 0, 12, 388, 1, 0, 0, 0, 12, 390, 1, 0, 0, 0, 13, 392, 1, 0, 0, 0, 13, 394, 1, 0, 0, 0, 13, 396, 1, 0, 0, 0, 13, 398, 1, 0, 0, 0, 13, 400, 1, 0, 0, 0, 13, 402, 1, 0, 0, 0, 13, 404, 1, 0, 0, 0, 13, 406, 1, 0, 0, 0, 13, 408, 1, 0, 0, 0, 13, 410, 1, 0, 0, 0, 14, 412, 1, 0, 0, 0, 14, 414, 1, 0, 0, 0, 14, 416, 1, 0, 0, 0, 14, 418, 1, 0, 0, 0, 14, 420, 1, 0, 0, 0, 14, 422, 1, 0, 0, 0, 15, 424, 1, 0, 0, 0, 15, 426, 1, 0, 0, 0, 15, 428, 1, 0, 0, 0, 15, 430, 1, 0, 0, 0, 15, 432, 1, 0, 0, 0, 15, 434, 1, 0, 0, 0, 15, 436, 1, 0, 0, 0, 15, 438, 1, 0, 0, 0, 15, 440, 1, 0, 0, 0, 16, 442, 1, 0, 0, 0, 18, 452, 1, 0, 0, 0, 20, 459, 1, 0, 0, 0, 22, 468, 1, 0, 0, 0, 24, 475, 1, 0, 0, 0, 26, 485, 1, 0, 0, 0, 28, 492, 1, 0, 0, 0, 30, 499, 1, 0, 0, 0, 32, 506, 1, 0, 0, 0, 34, 514, 1, 0, 0, 0, 36, 526, 1, 0, 0, 0, 38, 535, 1, 0, 0, 0, 40, 541, 1, 0, 0, 0, 42, 548, 1, 0, 0, 0, 44, 555, 1, 0, 0, 0, 46, 563, 1, 0, 0, 0, 48, 571, 1, 0, 0, 0, 50, 586, 1, 0, 0, 0, 52, 598, 1, 0, 0, 0, 54, 609, 1, 0, 0, 0, 56, 617, 1, 0, 0, 0, 58, 625, 1, 0, 0, 0, 60, 633, 1, 0, 0, 0, 62, 642, 1, 0, 0, 0, 64, 653, 1, 0, 0, 0, 66, 659, 1, 0, 0, 0, 68, 676, 1, 0, 0, 0, 70, 692, 1, 0, 0, 0, 72, 698, 1, 0, 0, 0, 74, 702, 1, 0, 0, 0, 76, 704, 1, 0, 0, 0, 78, 706, 1, 0, 0, 0, 80, 709, 1, 0, 0, 0, 82, 711, 1, 0, 0, 0, 84, 720, 1, 0, 0, 0, 86, 722, 1, 0, 0, 0, 88, 727, 1, 0, 0, 0, 90, 729, 1, 0, 0, 0, 92, 734, 1, 0, 0, 0, 94, 765, 1, 0, 0, 0, 96, 768, 1, 0, 0, 0, 98, 814, 1, 0, 0, 0, 100, 816, 1, 0, 0, 0, 102, 819, 1, 0, 0, 0, 104, 823, 1, 0, 0, 0, 106, 827, 1, 0, 0, 0, 108, 829, 1, 0, 0, 0, 110, 832, 1, 0, 0, 0, 112, 834, 1, 0, 0, 0, 114, 836, 1, 0, 0, 0, 116, 841, 1, 0, 0, 0, 118, 843, 1, 0, 0, 0, 120, 849, 1, 0, 0, 0, 122, 855, 1, 0, 0, 0, 124, 858, 1, 0, 0, 0, 126, 861, 1, 0, 0, 0, 128, 866, 1, 0, 0, 0, 130, 871, 1, 0, 0, 0, 132, 873, 1, 0, 0, 0, 134, 877, 1, 0, 0, 0, 136, 882, 1, 0, 0, 0, 138, 888, 1, 0, 0, 0, 140, 891, 1, 0, 0, 0, 142, 893, 1, 0, 0, 0, 144, 899, 1, 0, 0, 0, 146, 901, 1, 0, 0, 0, 148, 906, 1, 0, 0, 0, 150, 909, 1, 0, 0, 0, 152, 912, 1, 0, 0, 0, 154, 915, 1, 0, 0, 0, 156, 917, 1, 0, 0, 0, 158, 920, 1, 0, 0, 0, 160, 922, 1, 0, 0, 0, 162, 925, 1, 0, 0, 0, 164, 927, 1, 0, 0, 0, 166, 929, 1, 0, 0, 0, 168, 931, 1, 0, 0, 0, 170, 933, 1, 0, 0, 0, 172, 935, 1, 0, 0, 0, 174, 956, 1, 0, 0, 0, 176, 958, 1, 0, 0, 0, 178, 963, 1, 0, 0, 0, 180, 984, 1, 0, 0, 0, 182, 986, 1, 0, 0, 0, 184, 994, 1, 0, 0, 0, 186, 996, 1, 0, 0, 0, 188, 1000, 1, 0, 0, 0, 190, 1004, 1, 0, 0, 0, 192, 1008, 1, 0, 0, 0, 194, 1013, 1, 0, 0, 0, 196, 1018, 1, 0, 0, 0, 198, 1022, 1, 0, 0, 0, 200, 1026, 1, 0, 0, 0, 202, 1030, 1, 0, 0, 0, 204, 1035, 1, 0, 0, 0, 206, 1039, 1, 0, 0, 0, 208, 1043, 1, 0, 0, 0, 210, 1047, 1, 0, 0, 0, 212, 1051, 1, 0, 0, 0, 214, 1055, 1, 0, 0, 0, 216, 1067, 1, 0, 0, 0, 218, 1070, 1, 0, 0, 0, 220, 1074, 1, 0, 0, 0, 222, 1078, 1, 0, 0, 0, 224, 1082, 1, 0, 0, 0, 226, 1086, 1, 0, 0, 0, 228, 1090, 1, 0, 0, 0, 230, 1094, 1, 0, 0, 0, 232, 1099, 1, 0, 0, 0, 234, 1103, 1, 0, 0, 0, 236, 1107, 1, 0, 0, 0, 238, 1112, 1, 0, 0, 0, 240, 1121, 1, 0, 0, 0, 242, 1142, 1, 0, 0, 0, 244, 1146, 1, 0, 0, 0, 246, 1150, 1, 0, 0, 0, 248, 1154, 1, 0, 0, 0, 250, 1158, 1, 0, 0, 0, 252, 1162, 1, 0, 0, 0, 254, 1167, 1, 0, 0, 0, 256, 1171, 1, 0, 0, 0, 258, 1175, 1, 0, 0, 0, 260, 1179, 1, 0, 0, 0, 262, 1184, 1, 0, 0, 0, 264, 1189, 1, 0, 0, 0, 266, 1192, 1, 0, 0, 0, 268, 1196, 1, 0, 0, 0, 270, 1200, 1, 0, 0, 0, 272, 1204, 1, 0, 0, 0, 274, 1208, 1, 0, 0, 0, 276, 1213, 1, 0, 0, 0, 278, 1218, 1, 0, 0, 0, 280, 1223, 1, 0, 0, 0, 282, 1230, 1, 0, 0, 0, 284, 1239, 1, 0, 0, 0, 286, 1246, 1, 0, 0, 0, 288, 1250, 1, 0, 0, 0, 290, 1254, 1, 0, 0, 0, 292, 1258, 1, 0, 0, 0, 294, 1262, 1, 0, 0, 0, 296, 1268, 1, 0, 0, 0, 298, 1272, 1, 0, 0, 0, 300, 1276, 1, 0, 0, 0, 302, 1280, 1, 0, 0, 0, 304, 1284, 1, 0, 0, 0, 306, 1288, 1, 0, 0, 0, 308, 1292, 1, 0, 0, 0, 310, 1297, 1, 0, 0, 0, 312, 1302, 1, 0, 0, 0, 314, 1306, 1, 0, 0, 0, 316, 1310, 1, 0, 0, 0, 318, 1314, 1, 0, 0, 0, 320, 1319, 1, 0, 0, 0, 322, 1323, 1, 0, 0, 0, 324, 1328, 1, 0, 0, 0, 326, 1333, 1, 0, 0, 0, 328, 1337, 1, 0, 0, 0, 330, 1341, 1, 0, 0, 0, 332, 1345, 1, 0, 0, 0, 334, 1349, 1, 0, 0, 0, 336, 1353, 1, 0, 0, 0, 338, 1358, 1, 0, 0, 0, 340, 1363, 1, 0, 0, 0, 342, 1367, 1, 0, 0, 0, 344, 1371, 1, 0, 0, 0, 346, 1375, 1, 0, 0, 0, 348, 1380, 1, 0, 0, 0, 350, 1389, 1, 0, 0, 0, 352, 1393, 1, 0, 0, 0, 354, 1397, 1, 0, 0, 0, 356, 1401, 1, 0, 0, 0, 358, 1405, 1, 0, 0, 0, 360, 1410, 1, 0, 0, 0, 362, 1414, 1, 0, 0, 0, 364, 1418, 1, 0, 0, 0, 366, 1422, 1, 0, 0, 0, 368, 1427, 1, 0, 0, 0, 370, 1431, 1, 0, 0, 0, 372, 1435, 1, 0, 0, 0, 374, 1439, 1, 0, 0, 0, 376, 1443, 1, 0, 0, 0, 378, 1447, 1, 0, 0, 0, 380, 1453, 1, 0, 0, 0, 382, 1457, 1, 0, 0, 0, 384, 1461, 1, 0, 0, 0, 386, 1465, 1, 0, 0, 0, 388, 1469, 1, 0, 0, 0, 390, 1473, 1, 0, 0, 0, 392, 1477, 1, 0, 0, 0, 394, 1482, 1, 0, 0, 0, 396, 1486, 1, 0, 0, 0, 398, 1490, 1, 0, 0, 0, 400, 1496, 1, 0, 0, 0, 402, 1505, 1, 0, 0, 0, 404, 1509, 1, 0, 0, 0, 406, 1513, 1, 0, 0, 0, 408, 1517, 1, 0, 0, 0, 410, 1521, 1, 0, 0, 0, 412, 1525, 1, 0, 0, 0, 414, 1530, 1, 0, 0, 0, 416, 1536, 1, 0, 0, 0, 418, 1542, 1, 0, 0, 0, 420, 1546, 1, 0, 0, 0, 422, 1550, 1, 0, 0, 0, 424, 1554, 1, 0, 0, 0, 426, 1560, 1, 0, 0, 0, 428, 1566, 1, 0, 0, 0, 430, 1570, 1, 0, 0, 0, 432, 1574, 1, 0, 0, 0, 434, 1578, 1, 0, 0, 0, 436, 1584, 1, 0, 0, 0, 438, 1590, 1, 0, 0, 0, 440, 1596, 1, 0, 0, 0, 442, 443, 7, 0, 0, 0, 443, 444, 7, 1, 0, 0, 444, 445, 7, 2, 0, 0, 445, 446, 7, 2, 0, 0, 446, 447, 7, 3, 0, 0, 447, 448, 7, 4, 0, 0, 448, 449, 7, 5, 0, 0, 449, 450, 1, 0, 0, 0, 450, 451, 6, 0, 0, 0, 451, 17, 1, 0, 0, 0, 452, 453, 7, 0, 0, 0, 453, 454, 7, 6, 0, 0, 454, 455, 7, 7, 0, 0, 455, 456, 7, 8, 0, 0, 456, 457, 1, 0, 0, 0, 457, 458, 6, 1, 1, 0, 458, 19, 1, 0, 0, 0, 459, 460, 7, 3, 0, 0, 460, 461, 7, 9, 0, 0, 461, 462, 7, 6, 0, 0, 462, 463, 7, 1, 0, 0, 463, 464, 7, 4, 0, 0, 464, 465, 7, 10, 0, 0, 465, 466, 1, 0, 0, 0, 466, 467, 6, 2, 2, 0, 467, 21, 1, 0, 0, 0, 468, 469, 7, 3, 0, 0, 469, 470, 7, 11, 0, 0, 470, 471, 7, 12, 0, 0, 471, 472, 7, 13, 0, 0, 472, 473, 1, 0, 0, 0, 473, 474, 6, 3, 0, 0, 474, 23, 1, 0, 0, 0, 475, 476, 7, 3, 0, 0, 476, 477, 7, 14, 0, 0, 477, 478, 7, 8, 0, 0, 478, 479, 7, 13, 0, 0, 479, 480, 7, 12, 0, 0, 480, 481, 7, 1, 0, 0, 481, 482, 7, 9, 0, 0, 482, 483, 1, 0, 0, 0, 483, 484, 6, 4, 3, 0, 484, 25, 1, 0, 0, 0, 485, 486, 7, 15, 0, 0, 486, 487, 7, 6, 0, 0, 487, 488, 7, 7, 0, 0, 488, 489, 7, 16, 0, 0, 489, 490, 1, 0, 0, 0, 490, 491, 6, 5, 4, 0, 491, 27, 1, 0, 0, 0, 492, 493, 7, 17, 0, 0, 493, 494, 7, 6, 0, 0, 494, 495, 7, 7, 0, 0, 495, 496, 7, 18, 0, 0, 496, 497, 1, 0, 0, 0, 497, 498, 6, 6, 0, 0, 498, 29, 1, 0, 0, 0, 499, 500, 7, 18, 0, 0, 500, 501, 7, 3, 0, 0, 501, 502, 7, 3, 0, 0, 502, 503, 7, 8, 0, 0, 503, 504, 1, 0, 0, 0, 504, 505, 6, 7, 1, 0, 505, 31, 1, 0, 0, 0, 506, 507, 7, 13, 0, 0, 507, 508, 7, 1, 0, 0, 508, 509, 7, 16, 0, 0, 509, 510, 7, 1, 0, 0, 510, 511, 7, 5, 0, 0, 511, 512, 1, 0, 0, 0, 512, 513, 6, 8, 0, 0, 513, 33, 1, 0, 0, 0, 514, 515, 7, 16, 0, 0, 515, 516, 7, 11, 0, 0, 516, 517, 5, 95, 0, 0, 517, 518, 7, 3, 0, 0, 518, 519, 7, 14, 0, 0, 519, 520, 7, 8, 0, 0, 520, 521, 7, 12, 0, 0, 521, 522, 7, 9, 0, 0, 522, 523, 7, 0, 0, 0, 523, 524, 1, 0, 0, 0, 524, 525, 6, 9, 5, 0, 525, 35, 1, 0, 0, 0, 526, 527, 7, 6, 0, 0, 527, 528, 7, 3, 0, 0, 528, 529, 7, 9, 0, 0, 529, 530, 7, 12, 0, 0, 530, 531, 7, 16, 0, 0, 531, 532, 7, 3, 0, 0, 532, 533, 1, 0, 0, 0, 533, 534, 6, 10, 6, 0, 534, 37, 1, 0, 0, 0, 535, 536, 7, 6, 0, 0, 536, 537, 7, 7, 0, 0, 537, 538, 7, 19, 0, 0, 538, 539, 1, 0, 0, 0, 539, 540, 6, 11, 0, 0, 540, 39, 1, 0, 0, 0, 541, 542, 7, 2, 0, 0, 542, 543, 7, 10, 0, 0, 543, 544, 7, 7, 0, 0, 544, 545, 7, 19, 0, 0, 545, 546, 1, 0, 0, 0, 546, 547, 6, 12, 7, 0, 547, 41, 1, 0, 0, 0, 548, 549, 7, 2, 0, 0, 549, 550, 7, 7, 0, 0, 550, 551, 7, 6, 0, 0, 551, 552, 7, 5, 0, 0, 552, 553, 1, 0, 0, 0, 553, 554, 6, 13, 0, 0, 554, 43, 1, 0, 0, 0, 555, 556, 7, 2, 0, 0, 556, 557, 7, 5, 0, 0, 557, 558, 7, 12, 0, 0, 558, 559, 7, 5, 0, 0, 559, 560, 7, 2, 0, 0, 560, 561, 1, 0, 0, 0, 561, 562, 6, 14, 0, 0, 562, 45, 1, 0, 0, 0, 563, 564, 7, 19, 0, 0, 564, 565, 7, 10, 0, 0, 565, 566, 7, 3, 0, 0, 566, 567, 7, 6, 0, 0, 567, 568, 7, 3, 0, 0, 568, 569, 1, 0, 0, 0, 569, 570, 6, 15, 0, 0, 570, 47, 1, 0, 0, 0, 571, 572, 4, 16, 0, 0, 572, 573, 7, 1, 0, 0, 573, 574, 7, 9, 0, 0, 574, 575, 7, 13, 0, 0, 575, 576, 7, 1, 0, 0, 576, 577, 7, 9, 0, 0, 577, 578, 7, 3, 0, 0, 578, 579, 7, 2, 0, 0, 579, 580, 7, 5, 0, 0, 580, 581, 7, 12, 0, 0, 581, 582, 7, 5, 0, 0, 582, 583, 7, 2, 0, 0, 583, 584, 1, 0, 0, 0, 584, 585, 6, 16, 0, 0, 585, 49, 1, 0, 0, 0, 586, 587, 4, 17, 1, 0, 587, 588, 7, 13, 0, 0, 588, 589, 7, 7, 0, 0, 589, 590, 7, 7, 0, 0, 590, 591, 7, 18, 0, 0, 591, 592, 7, 20, 0, 0, 592, 593, 7, 8, 0, 0, 593, 594, 5, 95, 0, 0, 594, 595, 5, 128020, 0, 0, 595, 596, 1, 0, 0, 0, 596, 597, 6, 17, 8, 0, 597, 51, 1, 0, 0, 0, 598, 599, 4, 18, 2, 0, 599, 600, 7, 16, 0, 0, 600, 601, 7, 3, 0, 0, 601, 602, 7, 5, 0, 0, 602, 603, 7, 6, 0, 0, 603, 604, 7, 1, 0, 0, 604, 605, 7, 4, 0, 0, 605, 606, 7, 2, 0, 0, 606, 607, 1, 0, 0, 0, 607, 608, 6, 18, 9, 0, 608, 53, 1, 0, 0, 0, 609, 610, 4, 19, 3, 0, 610, 611, 7, 21, 0, 0, 611, 612, 7, 7, 0, 0, 612, 613, 7, 1, 0, 0, 613, 614, 7, 9, 0, 0, 614, 615, 1, 0, 0, 0, 615, 616, 6, 19, 10, 0, 616, 55, 1, 0, 0, 0, 617, 618, 4, 20, 4, 0, 618, 619, 7, 15, 0, 0, 619, 620, 7, 20, 0, 0, 620, 621, 7, 13, 0, 0, 621, 622, 7, 13, 0, 0, 622, 623, 1, 0, 0, 0, 623, 624, 6, 20, 10, 0, 624, 57, 1, 0, 0, 0, 625, 626, 4, 21, 5, 0, 626, 627, 7, 13, 0, 0, 627, 628, 7, 3, 0, 0, 628, 629, 7, 15, 0, 0, 629, 630, 7, 5, 0, 0, 630, 631, 1, 0, 0, 0, 631, 632, 6, 21, 10, 0, 632, 59, 1, 0, 0, 0, 633, 634, 4, 22, 6, 0, 634, 635, 7, 6, 0, 0, 635, 636, 7, 1, 0, 0, 636, 637, 7, 17, 0, 0, 637, 638, 7, 10, 0, 0, 638, 639, 7, 5, 0, 0, 639, 640, 1, 0, 0, 0, 640, 641, 6, 22, 10, 0, 641, 61, 1, 0, 0, 0, 642, 643, 4, 23, 7, 0, 643, 644, 7, 13, 0, 0, 644, 645, 7, 7, 0, 0, 645, 646, 7, 7, 0, 0, 646, 647, 7, 18, 0, 0, 647, 648, 7, 20, 0, 0, 648, 649, 7, 8, 0, 0, 649, 650, 1, 0, 0, 0, 650, 651, 6, 23, 10, 0, 651, 63, 1, 0, 0, 0, 652, 654, 8, 22, 0, 0, 653, 652, 1, 0, 0, 0, 654, 655, 1, 0, 0, 0, 655, 653, 1, 0, 0, 0, 655, 656, 1, 0, 0, 0, 656, 657, 1, 0, 0, 0, 657, 658, 6, 24, 0, 0, 658, 65, 1, 0, 0, 0, 659, 660, 5, 47, 0, 0, 660, 661, 5, 47, 0, 0, 661, 665, 1, 0, 0, 0, 662, 664, 8, 23, 0, 0, 663, 662, 1, 0, 0, 0, 664, 667, 1, 0, 0, 0, 665, 663, 1, 0, 0, 0, 665, 666, 1, 0, 0, 0, 666, 669, 1, 0, 0, 0, 667, 665, 1, 0, 0, 0, 668, 670, 5, 13, 0, 0, 669, 668, 1, 0, 0, 0, 669, 670, 1, 0, 0, 0, 670, 672, 1, 0, 0, 0, 671, 673, 5, 10, 0, 0, 672, 671, 1, 0, 0, 0, 672, 673, 1, 0, 0, 0, 673, 674, 1, 0, 0, 0, 674, 675, 6, 25, 11, 0, 675, 67, 1, 0, 0, 0, 676, 677, 5, 47, 0, 0, 677, 678, 5, 42, 0, 0, 678, 683, 1, 0, 0, 0, 679, 682, 3, 68, 26, 0, 680, 682, 9, 0, 0, 0, 681, 679, 1, 0, 0, 0, 681, 680, 1, 0, 0, 0, 682, 685, 1, 0, 0, 0, 683, 684, 1, 0, 0, 0, 683, 681, 1, 0, 0, 0, 684, 686, 1, 0, 0, 0, 685, 683, 1, 0, 0, 0, 686, 687, 5, 42, 0, 0, 687, 688, 5, 47, 0, 0, 688, 689, 1, 0, 0, 0, 689, 690, 6, 26, 11, 0, 690, 69, 1, 0, 0, 0, 691, 693, 7, 24, 0, 0, 692, 691, 1, 0, 0, 0, 693, 694, 1, 0, 0, 0, 694, 692, 1, 0, 0, 0, 694, 695, 1, 0, 0, 0, 695, 696, 1, 0, 0, 0, 696, 697, 6, 27, 11, 0, 697, 71, 1, 0, 0, 0, 698, 699, 5, 124, 0, 0, 699, 700, 1, 0, 0, 0, 700, 701, 6, 28, 12, 0, 701, 73, 1, 0, 0, 0, 702, 703, 7, 25, 0, 0, 703, 75, 1, 0, 0, 0, 704, 705, 7, 26, 0, 0, 705, 77, 1, 0, 0, 0, 706, 707, 5, 92, 0, 0, 707, 708, 7, 27, 0, 0, 708, 79, 1, 0, 0, 0, 709, 710, 8, 28, 0, 0, 710, 81, 1, 0, 0, 0, 711, 713, 7, 3, 0, 0, 712, 714, 7, 29, 0, 0, 713, 712, 1, 0, 0, 0, 713, 714, 1, 0, 0, 0, 714, 716, 1, 0, 0, 0, 715, 717, 3, 74, 29, 0, 716, 715, 1, 0, 0, 0, 717, 718, 1, 0, 0, 0, 718, 716, 1, 0, 0, 0, 718, 719, 1, 0, 0, 0, 719, 83, 1, 0, 0, 0, 720, 721, 5, 64, 0, 0, 721, 85, 1, 0, 0, 0, 722, 723, 5, 96, 0, 0, 723, 87, 1, 0, 0, 0, 724, 728, 8, 30, 0, 0, 725, 726, 5, 96, 0, 0, 726, 728, 5, 96, 0, 0, 727, 724, 1, 0, 0, 0, 727, 725, 1, 0, 0, 0, 728, 89, 1, 0, 0, 0, 729, 730, 5, 95, 0, 0, 730, 91, 1, 0, 0, 0, 731, 735, 3, 76, 30, 0, 732, 735, 3, 74, 29, 0, 733, 735, 3, 90, 37, 0, 734, 731, 1, 0, 0, 0, 734, 732, 1, 0, 0, 0, 734, 733, 1, 0, 0, 0, 735, 93, 1, 0, 0, 0, 736, 741, 5, 34, 0, 0, 737, 740, 3, 78, 31, 0, 738, 740, 3, 80, 32, 0, 739, 737, 1, 0, 0, 0, 739, 738, 1, 0, 0, 0, 740, 743, 1, 0, 0, 0, 741, 739, 1, 0, 0, 0, 741, 742, 1, 0, 0, 0, 742, 744, 1, 0, 0, 0, 743, 741, 1, 0, 0, 0, 744, 766, 5, 34, 0, 0, 745, 746, 5, 34, 0, 0, 746, 747, 5, 34, 0, 0, 747, 748, 5, 34, 0, 0, 748, 752, 1, 0, 0, 0, 749, 751, 8, 23, 0, 0, 750, 749, 1, 0, 0, 0, 751, 754, 1, 0, 0, 0, 752, 753, 1, 0, 0, 0, 752, 750, 1, 0, 0, 0, 753, 755, 1, 0, 0, 0, 754, 752, 1, 0, 0, 0, 755, 756, 5, 34, 0, 0, 756, 757, 5, 34, 0, 0, 757, 758, 5, 34, 0, 0, 758, 760, 1, 0, 0, 0, 759, 761, 5, 34, 0, 0, 760, 759, 1, 0, 0, 0, 760, 761, 1, 0, 0, 0, 761, 763, 1, 0, 0, 0, 762, 764, 5, 34, 0, 0, 763, 762, 1, 0, 0, 0, 763, 764, 1, 0, 0, 0, 764, 766, 1, 0, 0, 0, 765, 736, 1, 0, 0, 0, 765, 745, 1, 0, 0, 0, 766, 95, 1, 0, 0, 0, 767, 769, 3, 74, 29, 0, 768, 767, 1, 0, 0, 0, 769, 770, 1, 0, 0, 0, 770, 768, 1, 0, 0, 0, 770, 771, 1, 0, 0, 0, 771, 97, 1, 0, 0, 0, 772, 774, 3, 74, 29, 0, 773, 772, 1, 0, 0, 0, 774, 775, 1, 0, 0, 0, 775, 773, 1, 0, 0, 0, 775, 776, 1, 0, 0, 0, 776, 777, 1, 0, 0, 0, 777, 781, 3, 116, 50, 0, 778, 780, 3, 74, 29, 0, 779, 778, 1, 0, 0, 0, 780, 783, 1, 0, 0, 0, 781, 779, 1, 0, 0, 0, 781, 782, 1, 0, 0, 0, 782, 815, 1, 0, 0, 0, 783, 781, 1, 0, 0, 0, 784, 786, 3, 116, 50, 0, 785, 787, 3, 74, 29, 0, 786, 785, 1, 0, 0, 0, 787, 788, 1, 0, 0, 0, 788, 786, 1, 0, 0, 0, 788, 789, 1, 0, 0, 0, 789, 815, 1, 0, 0, 0, 790, 792, 3, 74, 29, 0, 791, 790, 1, 0, 0, 0, 792, 793, 1, 0, 0, 0, 793, 791, 1, 0, 0, 0, 793, 794, 1, 0, 0, 0, 794, 802, 1, 0, 0, 0, 795, 799, 3, 116, 50, 0, 796, 798, 3, 74, 29, 0, 797, 796, 1, 0, 0, 0, 798, 801, 1, 0, 0, 0, 799, 797, 1, 0, 0, 0, 799, 800, 1, 0, 0, 0, 800, 803, 1, 0, 0, 0, 801, 799, 1, 0, 0, 0, 802, 795, 1, 0, 0, 0, 802, 803, 1, 0, 0, 0, 803, 804, 1, 0, 0, 0, 804, 805, 3, 82, 33, 0, 805, 815, 1, 0, 0, 0, 806, 808, 3, 116, 50, 0, 807, 809, 3, 74, 29, 0, 808, 807, 1, 0, 0, 0, 809, 810, 1, 0, 0, 0, 810, 808, 1, 0, 0, 0, 810, 811, 1, 0, 0, 0, 811, 812, 1, 0, 0, 0, 812, 813, 3, 82, 33, 0, 813, 815, 1, 0, 0, 0, 814, 773, 1, 0, 0, 0, 814, 784, 1, 0, 0, 0, 814, 791, 1, 0, 0, 0, 814, 806, 1, 0, 0, 0, 815, 99, 1, 0, 0, 0, 816, 817, 7, 31, 0, 0, 817, 818, 7, 32, 0, 0, 818, 101, 1, 0, 0, 0, 819, 820, 7, 12, 0, 0, 820, 821, 7, 9, 0, 0, 821, 822, 7, 0, 0, 0, 822, 103, 1, 0, 0, 0, 823, 824, 7, 12, 0, 0, 824, 825, 7, 2, 0, 0, 825, 826, 7, 4, 0, 0, 826, 105, 1, 0, 0, 0, 827, 828, 5, 61, 0, 0, 828, 107, 1, 0, 0, 0, 829, 830, 5, 58, 0, 0, 830, 831, 5, 58, 0, 0, 831, 109, 1, 0, 0, 0, 832, 833, 5, 58, 0, 0, 833, 111, 1, 0, 0, 0, 834, 835, 5, 44, 0, 0, 835, 113, 1, 0, 0, 0, 836, 837, 7, 0, 0, 0, 837, 838, 7, 3, 0, 0, 838, 839, 7, 2, 0, 0, 839, 840, 7, 4, 0, 0, 840, 115, 1, 0, 0, 0, 841, 842, 5, 46, 0, 0, 842, 117, 1, 0, 0, 0, 843, 844, 7, 15, 0, 0, 844, 845, 7, 12, 0, 0, 845, 846, 7, 13, 0, 0, 846, 847, 7, 2, 0, 0, 847, 848, 7, 3, 0, 0, 848, 119, 1, 0, 0, 0, 849, 850, 7, 15, 0, 0, 850, 851, 7, 1, 0, 0, 851, 852, 7, 6, 0, 0, 852, 853, 7, 2, 0, 0, 853, 854, 7, 5, 0, 0, 854, 121, 1, 0, 0, 0, 855, 856, 7, 1, 0, 0, 856, 857, 7, 9, 0, 0, 857, 123, 1, 0, 0, 0, 858, 859, 7, 1, 0, 0, 859, 860, 7, 2, 0, 0, 860, 125, 1, 0, 0, 0, 861, 862, 7, 13, 0, 0, 862, 863, 7, 12, 0, 0, 863, 864, 7, 2, 0, 0, 864, 865, 7, 5, 0, 0, 865, 127, 1, 0, 0, 0, 866, 867, 7, 13, 0, 0, 867, 868, 7, 1, 0, 0, 868, 869, 7, 18, 0, 0, 869, 870, 7, 3, 0, 0, 870, 129, 1, 0, 0, 0, 871, 872, 5, 40, 0, 0, 872, 131, 1, 0, 0, 0, 873, 874, 7, 9, 0, 0, 874, 875, 7, 7, 0, 0, 875, 876, 7, 5, 0, 0, 876, 133, 1, 0, 0, 0, 877, 878, 7, 9, 0, 0, 878, 879, 7, 20, 0, 0, 879, 880, 7, 13, 0, 0, 880, 881, 7, 13, 0, 0, 881, 135, 1, 0, 0, 0, 882, 883, 7, 9, 0, 0, 883, 884, 7, 20, 0, 0, 884, 885, 7, 13, 0, 0, 885, 886, 7, 13, 0, 0, 886, 887, 7, 2, 0, 0, 887, 137, 1, 0, 0, 0, 888, 889, 7, 7, 0, 0, 889, 890, 7, 6, 0, 0, 890, 139, 1, 0, 0, 0, 891, 892, 5, 63, 0, 0, 892, 141, 1, 0, 0, 0, 893, 894, 7, 6, 0, 0, 894, 895, 7, 13, 0, 0, 895, 896, 7, 1, 0, 0, 896, 897, 7, 18, 0, 0, 897, 898, 7, 3, 0, 0, 898, 143, 1, 0, 0, 0, 899, 900, 5, 41, 0, 0, 900, 145, 1, 0, 0, 0, 901, 902, 7, 5, 0, 0, 902, 903, 7, 6, 0, 0, 903, 904, 7, 20, 0, 0, 904, 905, 7, 3, 0, 0, 905, 147, 1, 0, 0, 0, 906, 907, 5, 61, 0, 0, 907, 908, 5, 61, 0, 0, 908, 149, 1, 0, 0, 0, 909, 910, 5, 61, 0, 0, 910, 911, 5, 126, 0, 0, 911, 151, 1, 0, 0, 0, 912, 913, 5, 33, 0, 0, 913, 914, 5, 61, 0, 0, 914, 153, 1, 0, 0, 0, 915, 916, 5, 60, 0, 0, 916, 155, 1, 0, 0, 0, 917, 918, 5, 60, 0, 0, 918, 919, 5, 61, 0, 0, 919, 157, 1, 0, 0, 0, 920, 921, 5, 62, 0, 0, 921, 159, 1, 0, 0, 0, 922, 923, 5, 62, 0, 0, 923, 924, 5, 61, 0, 0, 924, 161, 1, 0, 0, 0, 925, 926, 5, 43, 0, 0, 926, 163, 1, 0, 0, 0, 927, 928, 5, 45, 0, 0, 928, 165, 1, 0, 0, 0, 929, 930, 5, 42, 0, 0, 930, 167, 1, 0, 0, 0, 931, 932, 5, 47, 0, 0, 932, 169, 1, 0, 0, 0, 933, 934, 5, 37, 0, 0, 934, 171, 1, 0, 0, 0, 935, 936, 3, 46, 15, 0, 936, 937, 1, 0, 0, 0, 937, 938, 6, 78, 13, 0, 938, 173, 1, 0, 0, 0, 939, 942, 3, 140, 62, 0, 940, 943, 3, 76, 30, 0, 941, 943, 3, 90, 37, 0, 942, 940, 1, 0, 0, 0, 942, 941, 1, 0, 0, 0, 943, 947, 1, 0, 0, 0, 944, 946, 3, 92, 38, 0, 945, 944, 1, 0, 0, 0, 946, 949, 1, 0, 0, 0, 947, 945, 1, 0, 0, 0, 947, 948, 1, 0, 0, 0, 948, 957, 1, 0, 0, 0, 949, 947, 1, 0, 0, 0, 950, 952, 3, 140, 62, 0, 951, 953, 3, 74, 29, 0, 952, 951, 1, 0, 0, 0, 953, 954, 1, 0, 0, 0, 954, 952, 1, 0, 0, 0, 954, 955, 1, 0, 0, 0, 955, 957, 1, 0, 0, 0, 956, 939, 1, 0, 0, 0, 956, 950, 1, 0, 0, 0, 957, 175, 1, 0, 0, 0, 958, 959, 5, 91, 0, 0, 959, 960, 1, 0, 0, 0, 960, 961, 6, 80, 0, 0, 961, 962, 6, 80, 0, 0, 962, 177, 1, 0, 0, 0, 963, 964, 5, 93, 0, 0, 964, 965, 1, 0, 0, 0, 965, 966, 6, 81, 12, 0, 966, 967, 6, 81, 12, 0, 967, 179, 1, 0, 0, 0, 968, 972, 3, 76, 30, 0, 969, 971, 3, 92, 38, 0, 970, 969, 1, 0, 0, 0, 971, 974, 1, 0, 0, 0, 972, 970, 1, 0, 0, 0, 972, 973, 1, 0, 0, 0, 973, 985, 1, 0, 0, 0, 974, 972, 1, 0, 0, 0, 975, 978, 3, 90, 37, 0, 976, 978, 3, 84, 34, 0, 977, 975, 1, 0, 0, 0, 977, 976, 1, 0, 0, 0, 978, 980, 1, 0, 0, 0, 979, 981, 3, 92, 38, 0, 980, 979, 1, 0, 0, 0, 981, 982, 1, 0, 0, 0, 982, 980, 1, 0, 0, 0, 982, 983, 1, 0, 0, 0, 983, 985, 1, 0, 0, 0, 984, 968, 1, 0, 0, 0, 984, 977, 1, 0, 0, 0, 985, 181, 1, 0, 0, 0, 986, 988, 3, 86, 35, 0, 987, 989, 3, 88, 36, 0, 988, 987, 1, 0, 0, 0, 989, 990, 1, 0, 0, 0, 990, 988, 1, 0, 0, 0, 990, 991, 1, 0, 0, 0, 991, 992, 1, 0, 0, 0, 992, 993, 3, 86, 35, 0, 993, 183, 1, 0, 0, 0, 994, 995, 3, 182, 83, 0, 995, 185, 1, 0, 0, 0, 996, 997, 3, 66, 25, 0, 997, 998, 1, 0, 0, 0, 998, 999, 6, 85, 11, 0, 999, 187, 1, 0, 0, 0, 1000, 1001, 3, 68, 26, 0, 1001, 1002, 1, 0, 0, 0, 1002, 1003, 6, 86, 11, 0, 1003, 189, 1, 0, 0, 0, 1004, 1005, 3, 70, 27, 0, 1005, 1006, 1, 0, 0, 0, 1006, 1007, 6, 87, 11, 0, 1007, 191, 1, 0, 0, 0, 1008, 1009, 3, 176, 80, 0, 1009, 1010, 1, 0, 0, 0, 1010, 1011, 6, 88, 14, 0, 1011, 1012, 6, 88, 15, 0, 1012, 193, 1, 0, 0, 0, 1013, 1014, 3, 72, 28, 0, 1014, 1015, 1, 0, 0, 0, 1015, 1016, 6, 89, 16, 0, 1016, 1017, 6, 89, 12, 0, 1017, 195, 1, 0, 0, 0, 1018, 1019, 3, 70, 27, 0, 1019, 1020, 1, 0, 0, 0, 1020, 1021, 6, 90, 11, 0, 1021, 197, 1, 0, 0, 0, 1022, 1023, 3, 66, 25, 0, 1023, 1024, 1, 0, 0, 0, 1024, 1025, 6, 91, 11, 0, 1025, 199, 1, 0, 0, 0, 1026, 1027, 3, 68, 26, 0, 1027, 1028, 1, 0, 0, 0, 1028, 1029, 6, 92, 11, 0, 1029, 201, 1, 0, 0, 0, 1030, 1031, 3, 72, 28, 0, 1031, 1032, 1, 0, 0, 0, 1032, 1033, 6, 93, 16, 0, 1033, 1034, 6, 93, 12, 0, 1034, 203, 1, 0, 0, 0, 1035, 1036, 3, 176, 80, 0, 1036, 1037, 1, 0, 0, 0, 1037, 1038, 6, 94, 14, 0, 1038, 205, 1, 0, 0, 0, 1039, 1040, 3, 178, 81, 0, 1040, 1041, 1, 0, 0, 0, 1041, 1042, 6, 95, 17, 0, 1042, 207, 1, 0, 0, 0, 1043, 1044, 3, 110, 47, 0, 1044, 1045, 1, 0, 0, 0, 1045, 1046, 6, 96, 18, 0, 1046, 209, 1, 0, 0, 0, 1047, 1048, 3, 112, 48, 0, 1048, 1049, 1, 0, 0, 0, 1049, 1050, 6, 97, 19, 0, 1050, 211, 1, 0, 0, 0, 1051, 1052, 3, 106, 45, 0, 1052, 1053, 1, 0, 0, 0, 1053, 1054, 6, 98, 20, 0, 1054, 213, 1, 0, 0, 0, 1055, 1056, 7, 16, 0, 0, 1056, 1057, 7, 3, 0, 0, 1057, 1058, 7, 5, 0, 0, 1058, 1059, 7, 12, 0, 0, 1059, 1060, 7, 0, 0, 0, 1060, 1061, 7, 12, 0, 0, 1061, 1062, 7, 5, 0, 0, 1062, 1063, 7, 12, 0, 0, 1063, 215, 1, 0, 0, 0, 1064, 1068, 8, 33, 0, 0, 1065, 1066, 5, 47, 0, 0, 1066, 1068, 8, 34, 0, 0, 1067, 1064, 1, 0, 0, 0, 1067, 1065, 1, 0, 0, 0, 1068, 217, 1, 0, 0, 0, 1069, 1071, 3, 216, 100, 0, 1070, 1069, 1, 0, 0, 0, 1071, 1072, 1, 0, 0, 0, 1072, 1070, 1, 0, 0, 0, 1072, 1073, 1, 0, 0, 0, 1073, 219, 1, 0, 0, 0, 1074, 1075, 3, 218, 101, 0, 1075, 1076, 1, 0, 0, 0, 1076, 1077, 6, 102, 21, 0, 1077, 221, 1, 0, 0, 0, 1078, 1079, 3, 94, 39, 0, 1079, 1080, 1, 0, 0, 0, 1080, 1081, 6, 103, 22, 0, 1081, 223, 1, 0, 0, 0, 1082, 1083, 3, 66, 25, 0, 1083, 1084, 1, 0, 0, 0, 1084, 1085, 6, 104, 11, 0, 1085, 225, 1, 0, 0, 0, 1086, 1087, 3, 68, 26, 0, 1087, 1088, 1, 0, 0, 0, 1088, 1089, 6, 105, 11, 0, 1089, 227, 1, 0, 0, 0, 1090, 1091, 3, 70, 27, 0, 1091, 1092, 1, 0, 0, 0, 1092, 1093, 6, 106, 11, 0, 1093, 229, 1, 0, 0, 0, 1094, 1095, 3, 72, 28, 0, 1095, 1096, 1, 0, 0, 0, 1096, 1097, 6, 107, 16, 0, 1097, 1098, 6, 107, 12, 0, 1098, 231, 1, 0, 0, 0, 1099, 1100, 3, 116, 50, 0, 1100, 1101, 1, 0, 0, 0, 1101, 1102, 6, 108, 23, 0, 1102, 233, 1, 0, 0, 0, 1103, 1104, 3, 112, 48, 0, 1104, 1105, 1, 0, 0, 0, 1105, 1106, 6, 109, 19, 0, 1106, 235, 1, 0, 0, 0, 1107, 1108, 4, 110, 8, 0, 1108, 1109, 3, 140, 62, 0, 1109, 1110, 1, 0, 0, 0, 1110, 1111, 6, 110, 24, 0, 1111, 237, 1, 0, 0, 0, 1112, 1113, 4, 111, 9, 0, 1113, 1114, 3, 174, 79, 0, 1114, 1115, 1, 0, 0, 0, 1115, 1116, 6, 111, 25, 0, 1116, 239, 1, 0, 0, 0, 1117, 1122, 3, 76, 30, 0, 1118, 1122, 3, 74, 29, 0, 1119, 1122, 3, 90, 37, 0, 1120, 1122, 3, 166, 75, 0, 1121, 1117, 1, 0, 0, 0, 1121, 1118, 1, 0, 0, 0, 1121, 1119, 1, 0, 0, 0, 1121, 1120, 1, 0, 0, 0, 1122, 241, 1, 0, 0, 0, 1123, 1126, 3, 76, 30, 0, 1124, 1126, 3, 166, 75, 0, 1125, 1123, 1, 0, 0, 0, 1125, 1124, 1, 0, 0, 0, 1126, 1130, 1, 0, 0, 0, 1127, 1129, 3, 240, 112, 0, 1128, 1127, 1, 0, 0, 0, 1129, 1132, 1, 0, 0, 0, 1130, 1128, 1, 0, 0, 0, 1130, 1131, 1, 0, 0, 0, 1131, 1143, 1, 0, 0, 0, 1132, 1130, 1, 0, 0, 0, 1133, 1136, 3, 90, 37, 0, 1134, 1136, 3, 84, 34, 0, 1135, 1133, 1, 0, 0, 0, 1135, 1134, 1, 0, 0, 0, 1136, 1138, 1, 0, 0, 0, 1137, 1139, 3, 240, 112, 0, 1138, 1137, 1, 0, 0, 0, 1139, 1140, 1, 0, 0, 0, 1140, 1138, 1, 0, 0, 0, 1140, 1141, 1, 0, 0, 0, 1141, 1143, 1, 0, 0, 0, 1142, 1125, 1, 0, 0, 0, 1142, 1135, 1, 0, 0, 0, 1143, 243, 1, 0, 0, 0, 1144, 1147, 3, 242, 113, 0, 1145, 1147, 3, 182, 83, 0, 1146, 1144, 1, 0, 0, 0, 1146, 1145, 1, 0, 0, 0, 1147, 1148, 1, 0, 0, 0, 1148, 1146, 1, 0, 0, 0, 1148, 1149, 1, 0, 0, 0, 1149, 245, 1, 0, 0, 0, 1150, 1151, 3, 66, 25, 0, 1151, 1152, 1, 0, 0, 0, 1152, 1153, 6, 115, 11, 0, 1153, 247, 1, 0, 0, 0, 1154, 1155, 3, 68, 26, 0, 1155, 1156, 1, 0, 0, 0, 1156, 1157, 6, 116, 11, 0, 1157, 249, 1, 0, 0, 0, 1158, 1159, 3, 70, 27, 0, 1159, 1160, 1, 0, 0, 0, 1160, 1161, 6, 117, 11, 0, 1161, 251, 1, 0, 0, 0, 1162, 1163, 3, 72, 28, 0, 1163, 1164, 1, 0, 0, 0, 1164, 1165, 6, 118, 16, 0, 1165, 1166, 6, 118, 12, 0, 1166, 253, 1, 0, 0, 0, 1167, 1168, 3, 106, 45, 0, 1168, 1169, 1, 0, 0, 0, 1169, 1170, 6, 119, 20, 0, 1170, 255, 1, 0, 0, 0, 1171, 1172, 3, 112, 48, 0, 1172, 1173, 1, 0, 0, 0, 1173, 1174, 6, 120, 19, 0, 1174, 257, 1, 0, 0, 0, 1175, 1176, 3, 116, 50, 0, 1176, 1177, 1, 0, 0, 0, 1177, 1178, 6, 121, 23, 0, 1178, 259, 1, 0, 0, 0, 1179, 1180, 4, 122, 10, 0, 1180, 1181, 3, 140, 62, 0, 1181, 1182, 1, 0, 0, 0, 1182, 1183, 6, 122, 24, 0, 1183, 261, 1, 0, 0, 0, 1184, 1185, 4, 123, 11, 0, 1185, 1186, 3, 174, 79, 0, 1186, 1187, 1, 0, 0, 0, 1187, 1188, 6, 123, 25, 0, 1188, 263, 1, 0, 0, 0, 1189, 1190, 7, 12, 0, 0, 1190, 1191, 7, 2, 0, 0, 1191, 265, 1, 0, 0, 0, 1192, 1193, 3, 244, 114, 0, 1193, 1194, 1, 0, 0, 0, 1194, 1195, 6, 125, 26, 0, 1195, 267, 1, 0, 0, 0, 1196, 1197, 3, 66, 25, 0, 1197, 1198, 1, 0, 0, 0, 1198, 1199, 6, 126, 11, 0, 1199, 269, 1, 0, 0, 0, 1200, 1201, 3, 68, 26, 0, 1201, 1202, 1, 0, 0, 0, 1202, 1203, 6, 127, 11, 0, 1203, 271, 1, 0, 0, 0, 1204, 1205, 3, 70, 27, 0, 1205, 1206, 1, 0, 0, 0, 1206, 1207, 6, 128, 11, 0, 1207, 273, 1, 0, 0, 0, 1208, 1209, 3, 72, 28, 0, 1209, 1210, 1, 0, 0, 0, 1210, 1211, 6, 129, 16, 0, 1211, 1212, 6, 129, 12, 0, 1212, 275, 1, 0, 0, 0, 1213, 1214, 3, 176, 80, 0, 1214, 1215, 1, 0, 0, 0, 1215, 1216, 6, 130, 14, 0, 1216, 1217, 6, 130, 27, 0, 1217, 277, 1, 0, 0, 0, 1218, 1219, 7, 7, 0, 0, 1219, 1220, 7, 9, 0, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1222, 6, 131, 28, 0, 1222, 279, 1, 0, 0, 0, 1223, 1224, 7, 19, 0, 0, 1224, 1225, 7, 1, 0, 0, 1225, 1226, 7, 5, 0, 0, 1226, 1227, 7, 10, 0, 0, 1227, 1228, 1, 0, 0, 0, 1228, 1229, 6, 132, 28, 0, 1229, 281, 1, 0, 0, 0, 1230, 1231, 8, 35, 0, 0, 1231, 283, 1, 0, 0, 0, 1232, 1234, 3, 282, 133, 0, 1233, 1232, 1, 0, 0, 0, 1234, 1235, 1, 0, 0, 0, 1235, 1233, 1, 0, 0, 0, 1235, 1236, 1, 0, 0, 0, 1236, 1237, 1, 0, 0, 0, 1237, 1238, 3, 110, 47, 0, 1238, 1240, 1, 0, 0, 0, 1239, 1233, 1, 0, 0, 0, 1239, 1240, 1, 0, 0, 0, 1240, 1242, 1, 0, 0, 0, 1241, 1243, 3, 282, 133, 0, 1242, 1241, 1, 0, 0, 0, 1243, 1244, 1, 0, 0, 0, 1244, 1242, 1, 0, 0, 0, 1244, 1245, 1, 0, 0, 0, 1245, 285, 1, 0, 0, 0, 1246, 1247, 3, 284, 134, 0, 1247, 1248, 1, 0, 0, 0, 1248, 1249, 6, 135, 29, 0, 1249, 287, 1, 0, 0, 0, 1250, 1251, 3, 66, 25, 0, 1251, 1252, 1, 0, 0, 0, 1252, 1253, 6, 136, 11, 0, 1253, 289, 1, 0, 0, 0, 1254, 1255, 3, 68, 26, 0, 1255, 1256, 1, 0, 0, 0, 1256, 1257, 6, 137, 11, 0, 1257, 291, 1, 0, 0, 0, 1258, 1259, 3, 70, 27, 0, 1259, 1260, 1, 0, 0, 0, 1260, 1261, 6, 138, 11, 0, 1261, 293, 1, 0, 0, 0, 1262, 1263, 3, 72, 28, 0, 1263, 1264, 1, 0, 0, 0, 1264, 1265, 6, 139, 16, 0, 1265, 1266, 6, 139, 12, 0, 1266, 1267, 6, 139, 12, 0, 1267, 295, 1, 0, 0, 0, 1268, 1269, 3, 106, 45, 0, 1269, 1270, 1, 0, 0, 0, 1270, 1271, 6, 140, 20, 0, 1271, 297, 1, 0, 0, 0, 1272, 1273, 3, 112, 48, 0, 1273, 1274, 1, 0, 0, 0, 1274, 1275, 6, 141, 19, 0, 1275, 299, 1, 0, 0, 0, 1276, 1277, 3, 116, 50, 0, 1277, 1278, 1, 0, 0, 0, 1278, 1279, 6, 142, 23, 0, 1279, 301, 1, 0, 0, 0, 1280, 1281, 3, 280, 132, 0, 1281, 1282, 1, 0, 0, 0, 1282, 1283, 6, 143, 30, 0, 1283, 303, 1, 0, 0, 0, 1284, 1285, 3, 244, 114, 0, 1285, 1286, 1, 0, 0, 0, 1286, 1287, 6, 144, 26, 0, 1287, 305, 1, 0, 0, 0, 1288, 1289, 3, 184, 84, 0, 1289, 1290, 1, 0, 0, 0, 1290, 1291, 6, 145, 31, 0, 1291, 307, 1, 0, 0, 0, 1292, 1293, 4, 146, 12, 0, 1293, 1294, 3, 140, 62, 0, 1294, 1295, 1, 0, 0, 0, 1295, 1296, 6, 146, 24, 0, 1296, 309, 1, 0, 0, 0, 1297, 1298, 4, 147, 13, 0, 1298, 1299, 3, 174, 79, 0, 1299, 1300, 1, 0, 0, 0, 1300, 1301, 6, 147, 25, 0, 1301, 311, 1, 0, 0, 0, 1302, 1303, 3, 66, 25, 0, 1303, 1304, 1, 0, 0, 0, 1304, 1305, 6, 148, 11, 0, 1305, 313, 1, 0, 0, 0, 1306, 1307, 3, 68, 26, 0, 1307, 1308, 1, 0, 0, 0, 1308, 1309, 6, 149, 11, 0, 1309, 315, 1, 0, 0, 0, 1310, 1311, 3, 70, 27, 0, 1311, 1312, 1, 0, 0, 0, 1312, 1313, 6, 150, 11, 0, 1313, 317, 1, 0, 0, 0, 1314, 1315, 3, 72, 28, 0, 1315, 1316, 1, 0, 0, 0, 1316, 1317, 6, 151, 16, 0, 1317, 1318, 6, 151, 12, 0, 1318, 319, 1, 0, 0, 0, 1319, 1320, 3, 116, 50, 0, 1320, 1321, 1, 0, 0, 0, 1321, 1322, 6, 152, 23, 0, 1322, 321, 1, 0, 0, 0, 1323, 1324, 4, 153, 14, 0, 1324, 1325, 3, 140, 62, 0, 1325, 1326, 1, 0, 0, 0, 1326, 1327, 6, 153, 24, 0, 1327, 323, 1, 0, 0, 0, 1328, 1329, 4, 154, 15, 0, 1329, 1330, 3, 174, 79, 0, 1330, 1331, 1, 0, 0, 0, 1331, 1332, 6, 154, 25, 0, 1332, 325, 1, 0, 0, 0, 1333, 1334, 3, 184, 84, 0, 1334, 1335, 1, 0, 0, 0, 1335, 1336, 6, 155, 31, 0, 1336, 327, 1, 0, 0, 0, 1337, 1338, 3, 180, 82, 0, 1338, 1339, 1, 0, 0, 0, 1339, 1340, 6, 156, 32, 0, 1340, 329, 1, 0, 0, 0, 1341, 1342, 3, 66, 25, 0, 1342, 1343, 1, 0, 0, 0, 1343, 1344, 6, 157, 11, 0, 1344, 331, 1, 0, 0, 0, 1345, 1346, 3, 68, 26, 0, 1346, 1347, 1, 0, 0, 0, 1347, 1348, 6, 158, 11, 0, 1348, 333, 1, 0, 0, 0, 1349, 1350, 3, 70, 27, 0, 1350, 1351, 1, 0, 0, 0, 1351, 1352, 6, 159, 11, 0, 1352, 335, 1, 0, 0, 0, 1353, 1354, 3, 72, 28, 0, 1354, 1355, 1, 0, 0, 0, 1355, 1356, 6, 160, 16, 0, 1356, 1357, 6, 160, 12, 0, 1357, 337, 1, 0, 0, 0, 1358, 1359, 7, 1, 0, 0, 1359, 1360, 7, 9, 0, 0, 1360, 1361, 7, 15, 0, 0, 1361, 1362, 7, 7, 0, 0, 1362, 339, 1, 0, 0, 0, 1363, 1364, 3, 66, 25, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, 6, 162, 11, 0, 1366, 341, 1, 0, 0, 0, 1367, 1368, 3, 68, 26, 0, 1368, 1369, 1, 0, 0, 0, 1369, 1370, 6, 163, 11, 0, 1370, 343, 1, 0, 0, 0, 1371, 1372, 3, 70, 27, 0, 1372, 1373, 1, 0, 0, 0, 1373, 1374, 6, 164, 11, 0, 1374, 345, 1, 0, 0, 0, 1375, 1376, 3, 178, 81, 0, 1376, 1377, 1, 0, 0, 0, 1377, 1378, 6, 165, 17, 0, 1378, 1379, 6, 165, 12, 0, 1379, 347, 1, 0, 0, 0, 1380, 1381, 3, 110, 47, 0, 1381, 1382, 1, 0, 0, 0, 1382, 1383, 6, 166, 18, 0, 1383, 349, 1, 0, 0, 0, 1384, 1390, 3, 84, 34, 0, 1385, 1390, 3, 74, 29, 0, 1386, 1390, 3, 116, 50, 0, 1387, 1390, 3, 76, 30, 0, 1388, 1390, 3, 90, 37, 0, 1389, 1384, 1, 0, 0, 0, 1389, 1385, 1, 0, 0, 0, 1389, 1386, 1, 0, 0, 0, 1389, 1387, 1, 0, 0, 0, 1389, 1388, 1, 0, 0, 0, 1390, 1391, 1, 0, 0, 0, 1391, 1389, 1, 0, 0, 0, 1391, 1392, 1, 0, 0, 0, 1392, 351, 1, 0, 0, 0, 1393, 1394, 3, 66, 25, 0, 1394, 1395, 1, 0, 0, 0, 1395, 1396, 6, 168, 11, 0, 1396, 353, 1, 0, 0, 0, 1397, 1398, 3, 68, 26, 0, 1398, 1399, 1, 0, 0, 0, 1399, 1400, 6, 169, 11, 0, 1400, 355, 1, 0, 0, 0, 1401, 1402, 3, 70, 27, 0, 1402, 1403, 1, 0, 0, 0, 1403, 1404, 6, 170, 11, 0, 1404, 357, 1, 0, 0, 0, 1405, 1406, 3, 72, 28, 0, 1406, 1407, 1, 0, 0, 0, 1407, 1408, 6, 171, 16, 0, 1408, 1409, 6, 171, 12, 0, 1409, 359, 1, 0, 0, 0, 1410, 1411, 3, 110, 47, 0, 1411, 1412, 1, 0, 0, 0, 1412, 1413, 6, 172, 18, 0, 1413, 361, 1, 0, 0, 0, 1414, 1415, 3, 112, 48, 0, 1415, 1416, 1, 0, 0, 0, 1416, 1417, 6, 173, 19, 0, 1417, 363, 1, 0, 0, 0, 1418, 1419, 3, 116, 50, 0, 1419, 1420, 1, 0, 0, 0, 1420, 1421, 6, 174, 23, 0, 1421, 365, 1, 0, 0, 0, 1422, 1423, 3, 278, 131, 0, 1423, 1424, 1, 0, 0, 0, 1424, 1425, 6, 175, 33, 0, 1425, 1426, 6, 175, 34, 0, 1426, 367, 1, 0, 0, 0, 1427, 1428, 3, 218, 101, 0, 1428, 1429, 1, 0, 0, 0, 1429, 1430, 6, 176, 21, 0, 1430, 369, 1, 0, 0, 0, 1431, 1432, 3, 94, 39, 0, 1432, 1433, 1, 0, 0, 0, 1433, 1434, 6, 177, 22, 0, 1434, 371, 1, 0, 0, 0, 1435, 1436, 3, 66, 25, 0, 1436, 1437, 1, 0, 0, 0, 1437, 1438, 6, 178, 11, 0, 1438, 373, 1, 0, 0, 0, 1439, 1440, 3, 68, 26, 0, 1440, 1441, 1, 0, 0, 0, 1441, 1442, 6, 179, 11, 0, 1442, 375, 1, 0, 0, 0, 1443, 1444, 3, 70, 27, 0, 1444, 1445, 1, 0, 0, 0, 1445, 1446, 6, 180, 11, 0, 1446, 377, 1, 0, 0, 0, 1447, 1448, 3, 72, 28, 0, 1448, 1449, 1, 0, 0, 0, 1449, 1450, 6, 181, 16, 0, 1450, 1451, 6, 181, 12, 0, 1451, 1452, 6, 181, 12, 0, 1452, 379, 1, 0, 0, 0, 1453, 1454, 3, 112, 48, 0, 1454, 1455, 1, 0, 0, 0, 1455, 1456, 6, 182, 19, 0, 1456, 381, 1, 0, 0, 0, 1457, 1458, 3, 116, 50, 0, 1458, 1459, 1, 0, 0, 0, 1459, 1460, 6, 183, 23, 0, 1460, 383, 1, 0, 0, 0, 1461, 1462, 3, 244, 114, 0, 1462, 1463, 1, 0, 0, 0, 1463, 1464, 6, 184, 26, 0, 1464, 385, 1, 0, 0, 0, 1465, 1466, 3, 66, 25, 0, 1466, 1467, 1, 0, 0, 0, 1467, 1468, 6, 185, 11, 0, 1468, 387, 1, 0, 0, 0, 1469, 1470, 3, 68, 26, 0, 1470, 1471, 1, 0, 0, 0, 1471, 1472, 6, 186, 11, 0, 1472, 389, 1, 0, 0, 0, 1473, 1474, 3, 70, 27, 0, 1474, 1475, 1, 0, 0, 0, 1475, 1476, 6, 187, 11, 0, 1476, 391, 1, 0, 0, 0, 1477, 1478, 3, 72, 28, 0, 1478, 1479, 1, 0, 0, 0, 1479, 1480, 6, 188, 16, 0, 1480, 1481, 6, 188, 12, 0, 1481, 393, 1, 0, 0, 0, 1482, 1483, 3, 54, 19, 0, 1483, 1484, 1, 0, 0, 0, 1484, 1485, 6, 189, 35, 0, 1485, 395, 1, 0, 0, 0, 1486, 1487, 3, 264, 124, 0, 1487, 1488, 1, 0, 0, 0, 1488, 1489, 6, 190, 36, 0, 1489, 397, 1, 0, 0, 0, 1490, 1491, 3, 278, 131, 0, 1491, 1492, 1, 0, 0, 0, 1492, 1493, 6, 191, 33, 0, 1493, 1494, 6, 191, 12, 0, 1494, 1495, 6, 191, 0, 0, 1495, 399, 1, 0, 0, 0, 1496, 1497, 7, 20, 0, 0, 1497, 1498, 7, 2, 0, 0, 1498, 1499, 7, 1, 0, 0, 1499, 1500, 7, 9, 0, 0, 1500, 1501, 7, 17, 0, 0, 1501, 1502, 1, 0, 0, 0, 1502, 1503, 6, 192, 12, 0, 1503, 1504, 6, 192, 0, 0, 1504, 401, 1, 0, 0, 0, 1505, 1506, 3, 180, 82, 0, 1506, 1507, 1, 0, 0, 0, 1507, 1508, 6, 193, 32, 0, 1508, 403, 1, 0, 0, 0, 1509, 1510, 3, 184, 84, 0, 1510, 1511, 1, 0, 0, 0, 1511, 1512, 6, 194, 31, 0, 1512, 405, 1, 0, 0, 0, 1513, 1514, 3, 66, 25, 0, 1514, 1515, 1, 0, 0, 0, 1515, 1516, 6, 195, 11, 0, 1516, 407, 1, 0, 0, 0, 1517, 1518, 3, 68, 26, 0, 1518, 1519, 1, 0, 0, 0, 1519, 1520, 6, 196, 11, 0, 1520, 409, 1, 0, 0, 0, 1521, 1522, 3, 70, 27, 0, 1522, 1523, 1, 0, 0, 0, 1523, 1524, 6, 197, 11, 0, 1524, 411, 1, 0, 0, 0, 1525, 1526, 3, 72, 28, 0, 1526, 1527, 1, 0, 0, 0, 1527, 1528, 6, 198, 16, 0, 1528, 1529, 6, 198, 12, 0, 1529, 413, 1, 0, 0, 0, 1530, 1531, 3, 218, 101, 0, 1531, 1532, 1, 0, 0, 0, 1532, 1533, 6, 199, 21, 0, 1533, 1534, 6, 199, 12, 0, 1534, 1535, 6, 199, 37, 0, 1535, 415, 1, 0, 0, 0, 1536, 1537, 3, 94, 39, 0, 1537, 1538, 1, 0, 0, 0, 1538, 1539, 6, 200, 22, 0, 1539, 1540, 6, 200, 12, 0, 1540, 1541, 6, 200, 37, 0, 1541, 417, 1, 0, 0, 0, 1542, 1543, 3, 66, 25, 0, 1543, 1544, 1, 0, 0, 0, 1544, 1545, 6, 201, 11, 0, 1545, 419, 1, 0, 0, 0, 1546, 1547, 3, 68, 26, 0, 1547, 1548, 1, 0, 0, 0, 1548, 1549, 6, 202, 11, 0, 1549, 421, 1, 0, 0, 0, 1550, 1551, 3, 70, 27, 0, 1551, 1552, 1, 0, 0, 0, 1552, 1553, 6, 203, 11, 0, 1553, 423, 1, 0, 0, 0, 1554, 1555, 3, 110, 47, 0, 1555, 1556, 1, 0, 0, 0, 1556, 1557, 6, 204, 18, 0, 1557, 1558, 6, 204, 12, 0, 1558, 1559, 6, 204, 9, 0, 1559, 425, 1, 0, 0, 0, 1560, 1561, 3, 112, 48, 0, 1561, 1562, 1, 0, 0, 0, 1562, 1563, 6, 205, 19, 0, 1563, 1564, 6, 205, 12, 0, 1564, 1565, 6, 205, 9, 0, 1565, 427, 1, 0, 0, 0, 1566, 1567, 3, 66, 25, 0, 1567, 1568, 1, 0, 0, 0, 1568, 1569, 6, 206, 11, 0, 1569, 429, 1, 0, 0, 0, 1570, 1571, 3, 68, 26, 0, 1571, 1572, 1, 0, 0, 0, 1572, 1573, 6, 207, 11, 0, 1573, 431, 1, 0, 0, 0, 1574, 1575, 3, 70, 27, 0, 1575, 1576, 1, 0, 0, 0, 1576, 1577, 6, 208, 11, 0, 1577, 433, 1, 0, 0, 0, 1578, 1579, 3, 184, 84, 0, 1579, 1580, 1, 0, 0, 0, 1580, 1581, 6, 209, 12, 0, 1581, 1582, 6, 209, 0, 0, 1582, 1583, 6, 209, 31, 0, 1583, 435, 1, 0, 0, 0, 1584, 1585, 3, 180, 82, 0, 1585, 1586, 1, 0, 0, 0, 1586, 1587, 6, 210, 12, 0, 1587, 1588, 6, 210, 0, 0, 1588, 1589, 6, 210, 32, 0, 1589, 437, 1, 0, 0, 0, 1590, 1591, 3, 100, 42, 0, 1591, 1592, 1, 0, 0, 0, 1592, 1593, 6, 211, 12, 0, 1593, 1594, 6, 211, 0, 0, 1594, 1595, 6, 211, 38, 0, 1595, 439, 1, 0, 0, 0, 1596, 1597, 3, 72, 28, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1599, 6, 212, 16, 0, 1599, 1600, 6, 212, 12, 0, 1600, 441, 1, 0, 0, 0, 66, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 655, 665, 669, 672, 681, 683, 694, 713, 718, 727, 734, 739, 741, 752, 760, 763, 765, 770, 775, 781, 788, 793, 799, 802, 810, 814, 942, 947, 954, 956, 972, 977, 982, 984, 990, 1067, 1072, 1121, 1125, 1130, 1135, 1140, 1142, 1146, 1148, 1235, 1239, 1244, 1389, 1391, 39, 5, 1, 0, 5, 4, 0, 5, 6, 0, 5, 2, 0, 5, 3, 0, 5, 8, 0, 5, 5, 0, 5, 9, 0, 5, 11, 0, 5, 14, 0, 5, 13, 0, 0, 1, 0, 4, 0, 0, 7, 16, 0, 7, 70, 0, 5, 0, 0, 7, 29, 0, 7, 71, 0, 7, 38, 0, 7, 39, 0, 7, 36, 0, 7, 81, 0, 7, 30, 0, 7, 41, 0, 7, 53, 0, 7, 69, 0, 7, 85, 0, 5, 10, 0, 5, 7, 0, 7, 95, 0, 7, 94, 0, 7, 73, 0, 7, 72, 0, 7, 93, 0, 5, 12, 0, 7, 20, 0, 7, 89, 0, 5, 15, 0, 7, 33, 0] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java index f10881fcf0692..f04582e820e28 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer.java @@ -8,16 +8,14 @@ * 2.0. */ +import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.LexerATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.*; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue", "this-escape"}) public class EsqlBaseLexer extends LexerConfig { @@ -27,90 +25,96 @@ public class EsqlBaseLexer extends LexerConfig { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DISSECT=1, DROP=2, ENRICH=3, EVAL=4, EXPLAIN=5, FROM=6, GROK=7, KEEP=8, - LIMIT=9, MV_EXPAND=10, RENAME=11, ROW=12, SHOW=13, SORT=14, STATS=15, - WHERE=16, DEV_INLINESTATS=17, DEV_LOOKUP=18, DEV_METRICS=19, UNKNOWN_CMD=20, - LINE_COMMENT=21, MULTILINE_COMMENT=22, WS=23, COLON=24, PIPE=25, QUOTED_STRING=26, - INTEGER_LITERAL=27, DECIMAL_LITERAL=28, BY=29, AND=30, ASC=31, ASSIGN=32, - CAST_OP=33, COMMA=34, DESC=35, DOT=36, FALSE=37, FIRST=38, IN=39, IS=40, - LAST=41, LIKE=42, LP=43, NOT=44, NULL=45, NULLS=46, OR=47, PARAM=48, RLIKE=49, - RP=50, TRUE=51, EQ=52, CIEQ=53, NEQ=54, LT=55, LTE=56, GT=57, GTE=58, - PLUS=59, MINUS=60, ASTERISK=61, SLASH=62, PERCENT=63, NAMED_OR_POSITIONAL_PARAM=64, - OPENING_BRACKET=65, CLOSING_BRACKET=66, UNQUOTED_IDENTIFIER=67, QUOTED_IDENTIFIER=68, - EXPR_LINE_COMMENT=69, EXPR_MULTILINE_COMMENT=70, EXPR_WS=71, EXPLAIN_WS=72, - EXPLAIN_LINE_COMMENT=73, EXPLAIN_MULTILINE_COMMENT=74, METADATA=75, UNQUOTED_SOURCE=76, - FROM_LINE_COMMENT=77, FROM_MULTILINE_COMMENT=78, FROM_WS=79, ID_PATTERN=80, - PROJECT_LINE_COMMENT=81, PROJECT_MULTILINE_COMMENT=82, PROJECT_WS=83, - AS=84, RENAME_LINE_COMMENT=85, RENAME_MULTILINE_COMMENT=86, RENAME_WS=87, - ON=88, WITH=89, ENRICH_POLICY_NAME=90, ENRICH_LINE_COMMENT=91, ENRICH_MULTILINE_COMMENT=92, - ENRICH_WS=93, ENRICH_FIELD_LINE_COMMENT=94, ENRICH_FIELD_MULTILINE_COMMENT=95, - ENRICH_FIELD_WS=96, MVEXPAND_LINE_COMMENT=97, MVEXPAND_MULTILINE_COMMENT=98, - MVEXPAND_WS=99, INFO=100, SHOW_LINE_COMMENT=101, SHOW_MULTILINE_COMMENT=102, - SHOW_WS=103, SETTING=104, SETTING_LINE_COMMENT=105, SETTTING_MULTILINE_COMMENT=106, - SETTING_WS=107, LOOKUP_LINE_COMMENT=108, LOOKUP_MULTILINE_COMMENT=109, - LOOKUP_WS=110, LOOKUP_FIELD_LINE_COMMENT=111, LOOKUP_FIELD_MULTILINE_COMMENT=112, - LOOKUP_FIELD_WS=113, METRICS_LINE_COMMENT=114, METRICS_MULTILINE_COMMENT=115, - METRICS_WS=116, CLOSING_METRICS_LINE_COMMENT=117, CLOSING_METRICS_MULTILINE_COMMENT=118, - CLOSING_METRICS_WS=119; + DISSECT=1, DROP=2, ENRICH=3, EVAL=4, EXPLAIN=5, FROM=6, GROK=7, KEEP=8, + LIMIT=9, MV_EXPAND=10, RENAME=11, ROW=12, SHOW=13, SORT=14, STATS=15, + WHERE=16, DEV_INLINESTATS=17, DEV_LOOKUP=18, DEV_METRICS=19, DEV_JOIN=20, + DEV_JOIN_FULL=21, DEV_JOIN_LEFT=22, DEV_JOIN_RIGHT=23, DEV_JOIN_LOOKUP=24, + UNKNOWN_CMD=25, LINE_COMMENT=26, MULTILINE_COMMENT=27, WS=28, PIPE=29, + QUOTED_STRING=30, INTEGER_LITERAL=31, DECIMAL_LITERAL=32, BY=33, AND=34, + ASC=35, ASSIGN=36, CAST_OP=37, COLON=38, COMMA=39, DESC=40, DOT=41, FALSE=42, + FIRST=43, IN=44, IS=45, LAST=46, LIKE=47, LP=48, NOT=49, NULL=50, NULLS=51, + OR=52, PARAM=53, RLIKE=54, RP=55, TRUE=56, EQ=57, CIEQ=58, NEQ=59, LT=60, + LTE=61, GT=62, GTE=63, PLUS=64, MINUS=65, ASTERISK=66, SLASH=67, PERCENT=68, + NAMED_OR_POSITIONAL_PARAM=69, OPENING_BRACKET=70, CLOSING_BRACKET=71, + UNQUOTED_IDENTIFIER=72, QUOTED_IDENTIFIER=73, EXPR_LINE_COMMENT=74, EXPR_MULTILINE_COMMENT=75, + EXPR_WS=76, EXPLAIN_WS=77, EXPLAIN_LINE_COMMENT=78, EXPLAIN_MULTILINE_COMMENT=79, + METADATA=80, UNQUOTED_SOURCE=81, FROM_LINE_COMMENT=82, FROM_MULTILINE_COMMENT=83, + FROM_WS=84, ID_PATTERN=85, PROJECT_LINE_COMMENT=86, PROJECT_MULTILINE_COMMENT=87, + PROJECT_WS=88, AS=89, RENAME_LINE_COMMENT=90, RENAME_MULTILINE_COMMENT=91, + RENAME_WS=92, ON=93, WITH=94, ENRICH_POLICY_NAME=95, ENRICH_LINE_COMMENT=96, + ENRICH_MULTILINE_COMMENT=97, ENRICH_WS=98, ENRICH_FIELD_LINE_COMMENT=99, + ENRICH_FIELD_MULTILINE_COMMENT=100, ENRICH_FIELD_WS=101, MVEXPAND_LINE_COMMENT=102, + MVEXPAND_MULTILINE_COMMENT=103, MVEXPAND_WS=104, INFO=105, SHOW_LINE_COMMENT=106, + SHOW_MULTILINE_COMMENT=107, SHOW_WS=108, SETTING=109, SETTING_LINE_COMMENT=110, + SETTTING_MULTILINE_COMMENT=111, SETTING_WS=112, LOOKUP_LINE_COMMENT=113, + LOOKUP_MULTILINE_COMMENT=114, LOOKUP_WS=115, LOOKUP_FIELD_LINE_COMMENT=116, + LOOKUP_FIELD_MULTILINE_COMMENT=117, LOOKUP_FIELD_WS=118, USING=119, JOIN_LINE_COMMENT=120, + JOIN_MULTILINE_COMMENT=121, JOIN_WS=122, METRICS_LINE_COMMENT=123, METRICS_MULTILINE_COMMENT=124, + METRICS_WS=125, CLOSING_METRICS_LINE_COMMENT=126, CLOSING_METRICS_MULTILINE_COMMENT=127, + CLOSING_METRICS_WS=128; public static final int - EXPRESSION_MODE=1, EXPLAIN_MODE=2, FROM_MODE=3, PROJECT_MODE=4, RENAME_MODE=5, - ENRICH_MODE=6, ENRICH_FIELD_MODE=7, MVEXPAND_MODE=8, SHOW_MODE=9, SETTING_MODE=10, - LOOKUP_MODE=11, LOOKUP_FIELD_MODE=12, METRICS_MODE=13, CLOSING_METRICS_MODE=14; + EXPRESSION_MODE=1, EXPLAIN_MODE=2, FROM_MODE=3, PROJECT_MODE=4, RENAME_MODE=5, + ENRICH_MODE=6, ENRICH_FIELD_MODE=7, MVEXPAND_MODE=8, SHOW_MODE=9, SETTING_MODE=10, + LOOKUP_MODE=11, LOOKUP_FIELD_MODE=12, JOIN_MODE=13, METRICS_MODE=14, CLOSING_METRICS_MODE=15; public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; public static String[] modeNames = { - "DEFAULT_MODE", "EXPRESSION_MODE", "EXPLAIN_MODE", "FROM_MODE", "PROJECT_MODE", - "RENAME_MODE", "ENRICH_MODE", "ENRICH_FIELD_MODE", "MVEXPAND_MODE", "SHOW_MODE", - "SETTING_MODE", "LOOKUP_MODE", "LOOKUP_FIELD_MODE", "METRICS_MODE", "CLOSING_METRICS_MODE" + "DEFAULT_MODE", "EXPRESSION_MODE", "EXPLAIN_MODE", "FROM_MODE", "PROJECT_MODE", + "RENAME_MODE", "ENRICH_MODE", "ENRICH_FIELD_MODE", "MVEXPAND_MODE", "SHOW_MODE", + "SETTING_MODE", "LOOKUP_MODE", "LOOKUP_FIELD_MODE", "JOIN_MODE", "METRICS_MODE", + "CLOSING_METRICS_MODE" }; private static String[] makeRuleNames() { return new String[] { - "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", "KEEP", - "LIMIT", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", "WHERE", - "DEV_INLINESTATS", "DEV_LOOKUP", "DEV_METRICS", "UNKNOWN_CMD", "LINE_COMMENT", - "MULTILINE_COMMENT", "WS", "COLON", "PIPE", "DIGIT", "LETTER", "ESCAPE_SEQUENCE", - "UNESCAPED_CHARS", "EXPONENT", "ASPERAND", "BACKQUOTE", "BACKQUOTE_BLOCK", - "UNDERSCORE", "UNQUOTED_ID_BODY", "QUOTED_STRING", "INTEGER_LITERAL", - "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", "COMMA", - "DESC", "DOT", "FALSE", "FIRST", "IN", "IS", "LAST", "LIKE", "LP", "NOT", - "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", "CIEQ", - "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", - "PERCENT", "EXPRESSION_COLON", "NESTED_WHERE", "NAMED_OR_POSITIONAL_PARAM", - "OPENING_BRACKET", "CLOSING_BRACKET", "UNQUOTED_IDENTIFIER", "QUOTED_ID", - "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", "EXPR_WS", - "EXPLAIN_OPENING_BRACKET", "EXPLAIN_PIPE", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", - "EXPLAIN_MULTILINE_COMMENT", "FROM_PIPE", "FROM_OPENING_BRACKET", "FROM_CLOSING_BRACKET", - "FROM_COLON", "FROM_COMMA", "FROM_ASSIGN", "METADATA", "UNQUOTED_SOURCE_PART", - "UNQUOTED_SOURCE", "FROM_UNQUOTED_SOURCE", "FROM_QUOTED_SOURCE", "FROM_LINE_COMMENT", - "FROM_MULTILINE_COMMENT", "FROM_WS", "PROJECT_PIPE", "PROJECT_DOT", "PROJECT_COMMA", - "PROJECT_PARAM", "PROJECT_NAMED_OR_POSITIONAL_PARAM", "UNQUOTED_ID_BODY_WITH_PATTERN", - "UNQUOTED_ID_PATTERN", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", - "PROJECT_WS", "RENAME_PIPE", "RENAME_ASSIGN", "RENAME_COMMA", "RENAME_DOT", - "RENAME_PARAM", "RENAME_NAMED_OR_POSITIONAL_PARAM", "AS", "RENAME_ID_PATTERN", - "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", "RENAME_WS", "ENRICH_PIPE", - "ENRICH_OPENING_BRACKET", "ON", "WITH", "ENRICH_POLICY_NAME_BODY", "ENRICH_POLICY_NAME", - "ENRICH_MODE_UNQUOTED_VALUE", "ENRICH_LINE_COMMENT", "ENRICH_MULTILINE_COMMENT", - "ENRICH_WS", "ENRICH_FIELD_PIPE", "ENRICH_FIELD_ASSIGN", "ENRICH_FIELD_COMMA", - "ENRICH_FIELD_DOT", "ENRICH_FIELD_WITH", "ENRICH_FIELD_ID_PATTERN", "ENRICH_FIELD_QUOTED_IDENTIFIER", - "ENRICH_FIELD_PARAM", "ENRICH_FIELD_NAMED_OR_POSITIONAL_PARAM", "ENRICH_FIELD_LINE_COMMENT", - "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "MVEXPAND_PIPE", - "MVEXPAND_DOT", "MVEXPAND_PARAM", "MVEXPAND_NAMED_OR_POSITIONAL_PARAM", - "MVEXPAND_QUOTED_IDENTIFIER", "MVEXPAND_UNQUOTED_IDENTIFIER", "MVEXPAND_LINE_COMMENT", - "MVEXPAND_MULTILINE_COMMENT", "MVEXPAND_WS", "SHOW_PIPE", "INFO", "SHOW_LINE_COMMENT", - "SHOW_MULTILINE_COMMENT", "SHOW_WS", "SETTING_CLOSING_BRACKET", "SETTING_COLON", - "SETTING", "SETTING_LINE_COMMENT", "SETTTING_MULTILINE_COMMENT", "SETTING_WS", - "LOOKUP_PIPE", "LOOKUP_COLON", "LOOKUP_COMMA", "LOOKUP_DOT", "LOOKUP_ON", - "LOOKUP_UNQUOTED_SOURCE", "LOOKUP_QUOTED_SOURCE", "LOOKUP_LINE_COMMENT", - "LOOKUP_MULTILINE_COMMENT", "LOOKUP_WS", "LOOKUP_FIELD_PIPE", "LOOKUP_FIELD_COMMA", - "LOOKUP_FIELD_DOT", "LOOKUP_FIELD_ID_PATTERN", "LOOKUP_FIELD_LINE_COMMENT", - "LOOKUP_FIELD_MULTILINE_COMMENT", "LOOKUP_FIELD_WS", "METRICS_PIPE", - "METRICS_UNQUOTED_SOURCE", "METRICS_QUOTED_SOURCE", "METRICS_LINE_COMMENT", - "METRICS_MULTILINE_COMMENT", "METRICS_WS", "CLOSING_METRICS_COLON", "CLOSING_METRICS_COMMA", - "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", - "CLOSING_METRICS_WS", "CLOSING_METRICS_QUOTED_IDENTIFIER", "CLOSING_METRICS_UNQUOTED_IDENTIFIER", + "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", "KEEP", + "LIMIT", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", "WHERE", + "DEV_INLINESTATS", "DEV_LOOKUP", "DEV_METRICS", "DEV_JOIN", "DEV_JOIN_FULL", + "DEV_JOIN_LEFT", "DEV_JOIN_RIGHT", "DEV_JOIN_LOOKUP", "UNKNOWN_CMD", + "LINE_COMMENT", "MULTILINE_COMMENT", "WS", "PIPE", "DIGIT", "LETTER", + "ESCAPE_SEQUENCE", "UNESCAPED_CHARS", "EXPONENT", "ASPERAND", "BACKQUOTE", + "BACKQUOTE_BLOCK", "UNDERSCORE", "UNQUOTED_ID_BODY", "QUOTED_STRING", + "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", + "COLON", "COMMA", "DESC", "DOT", "FALSE", "FIRST", "IN", "IS", "LAST", + "LIKE", "LP", "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", + "EQ", "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "NESTED_WHERE", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", + "CLOSING_BRACKET", "UNQUOTED_IDENTIFIER", "QUOTED_ID", "QUOTED_IDENTIFIER", + "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", "EXPR_WS", "EXPLAIN_OPENING_BRACKET", + "EXPLAIN_PIPE", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", + "FROM_PIPE", "FROM_OPENING_BRACKET", "FROM_CLOSING_BRACKET", "FROM_COLON", + "FROM_COMMA", "FROM_ASSIGN", "METADATA", "UNQUOTED_SOURCE_PART", "UNQUOTED_SOURCE", + "FROM_UNQUOTED_SOURCE", "FROM_QUOTED_SOURCE", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", + "FROM_WS", "PROJECT_PIPE", "PROJECT_DOT", "PROJECT_COMMA", "PROJECT_PARAM", + "PROJECT_NAMED_OR_POSITIONAL_PARAM", "UNQUOTED_ID_BODY_WITH_PATTERN", + "UNQUOTED_ID_PATTERN", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", + "PROJECT_WS", "RENAME_PIPE", "RENAME_ASSIGN", "RENAME_COMMA", "RENAME_DOT", + "RENAME_PARAM", "RENAME_NAMED_OR_POSITIONAL_PARAM", "AS", "RENAME_ID_PATTERN", + "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", "RENAME_WS", "ENRICH_PIPE", + "ENRICH_OPENING_BRACKET", "ON", "WITH", "ENRICH_POLICY_NAME_BODY", "ENRICH_POLICY_NAME", + "ENRICH_MODE_UNQUOTED_VALUE", "ENRICH_LINE_COMMENT", "ENRICH_MULTILINE_COMMENT", + "ENRICH_WS", "ENRICH_FIELD_PIPE", "ENRICH_FIELD_ASSIGN", "ENRICH_FIELD_COMMA", + "ENRICH_FIELD_DOT", "ENRICH_FIELD_WITH", "ENRICH_FIELD_ID_PATTERN", "ENRICH_FIELD_QUOTED_IDENTIFIER", + "ENRICH_FIELD_PARAM", "ENRICH_FIELD_NAMED_OR_POSITIONAL_PARAM", "ENRICH_FIELD_LINE_COMMENT", + "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "MVEXPAND_PIPE", + "MVEXPAND_DOT", "MVEXPAND_PARAM", "MVEXPAND_NAMED_OR_POSITIONAL_PARAM", + "MVEXPAND_QUOTED_IDENTIFIER", "MVEXPAND_UNQUOTED_IDENTIFIER", "MVEXPAND_LINE_COMMENT", + "MVEXPAND_MULTILINE_COMMENT", "MVEXPAND_WS", "SHOW_PIPE", "INFO", "SHOW_LINE_COMMENT", + "SHOW_MULTILINE_COMMENT", "SHOW_WS", "SETTING_CLOSING_BRACKET", "SETTING_COLON", + "SETTING", "SETTING_LINE_COMMENT", "SETTTING_MULTILINE_COMMENT", "SETTING_WS", + "LOOKUP_PIPE", "LOOKUP_COLON", "LOOKUP_COMMA", "LOOKUP_DOT", "LOOKUP_ON", + "LOOKUP_UNQUOTED_SOURCE", "LOOKUP_QUOTED_SOURCE", "LOOKUP_LINE_COMMENT", + "LOOKUP_MULTILINE_COMMENT", "LOOKUP_WS", "LOOKUP_FIELD_PIPE", "LOOKUP_FIELD_COMMA", + "LOOKUP_FIELD_DOT", "LOOKUP_FIELD_ID_PATTERN", "LOOKUP_FIELD_LINE_COMMENT", + "LOOKUP_FIELD_MULTILINE_COMMENT", "LOOKUP_FIELD_WS", "JOIN_PIPE", "JOIN_JOIN", + "JOIN_AS", "JOIN_ON", "USING", "JOIN_UNQUOTED_IDENTIFER", "JOIN_QUOTED_IDENTIFIER", + "JOIN_LINE_COMMENT", "JOIN_MULTILINE_COMMENT", "JOIN_WS", "METRICS_PIPE", + "METRICS_UNQUOTED_SOURCE", "METRICS_QUOTED_SOURCE", "METRICS_LINE_COMMENT", + "METRICS_MULTILINE_COMMENT", "METRICS_WS", "CLOSING_METRICS_COLON", "CLOSING_METRICS_COMMA", + "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", + "CLOSING_METRICS_WS", "CLOSING_METRICS_QUOTED_IDENTIFIER", "CLOSING_METRICS_UNQUOTED_IDENTIFIER", "CLOSING_METRICS_BY", "CLOSING_METRICS_PIPE" }; } @@ -118,45 +122,49 @@ private static String[] makeRuleNames() { private static String[] makeLiteralNames() { return new String[] { - null, "'dissect'", "'drop'", "'enrich'", "'eval'", "'explain'", "'from'", - "'grok'", "'keep'", "'limit'", "'mv_expand'", "'rename'", "'row'", "'show'", - "'sort'", "'stats'", "'where'", null, null, null, null, null, null, null, - "':'", "'|'", null, null, null, "'by'", "'and'", "'asc'", "'='", "'::'", - "','", "'desc'", "'.'", "'false'", "'first'", "'in'", "'is'", "'last'", - "'like'", "'('", "'not'", "'null'", "'nulls'", "'or'", "'?'", "'rlike'", - "')'", "'true'", "'=='", "'=~'", "'!='", "'<'", "'<='", "'>'", "'>='", - "'+'", "'-'", "'*'", "'/'", "'%'", null, null, "']'", null, null, null, - null, null, null, null, null, "'metadata'", null, null, null, null, null, - null, null, null, "'as'", null, null, null, "'on'", "'with'", null, null, - null, null, null, null, null, null, null, null, "'info'" + null, "'dissect'", "'drop'", "'enrich'", "'eval'", "'explain'", "'from'", + "'grok'", "'keep'", "'limit'", "'mv_expand'", "'rename'", "'row'", "'show'", + "'sort'", "'stats'", "'where'", null, null, null, null, null, null, null, + null, null, null, null, null, "'|'", null, null, null, "'by'", "'and'", + "'asc'", "'='", "'::'", "':'", "','", "'desc'", "'.'", "'false'", "'first'", + "'in'", "'is'", "'last'", "'like'", "'('", "'not'", "'null'", "'nulls'", + "'or'", "'?'", "'rlike'", "')'", "'true'", "'=='", "'=~'", "'!='", "'<'", + "'<='", "'>'", "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", null, null, + "']'", null, null, null, null, null, null, null, null, "'metadata'", + null, null, null, null, null, null, null, null, "'as'", null, null, null, + "'on'", "'with'", null, null, null, null, null, null, null, null, null, + null, "'info'", null, null, null, null, null, null, null, null, null, + null, null, null, null, "'USING'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", - "KEEP", "LIMIT", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", - "WHERE", "DEV_INLINESTATS", "DEV_LOOKUP", "DEV_METRICS", "UNKNOWN_CMD", - "LINE_COMMENT", "MULTILINE_COMMENT", "WS", "COLON", "PIPE", "QUOTED_STRING", - "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", - "COMMA", "DESC", "DOT", "FALSE", "FIRST", "IN", "IS", "LAST", "LIKE", - "LP", "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", - "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", - "SLASH", "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", - "UNQUOTED_IDENTIFIER", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", - "EXPR_WS", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", - "METADATA", "UNQUOTED_SOURCE", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", - "FROM_WS", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", - "PROJECT_WS", "AS", "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", - "RENAME_WS", "ON", "WITH", "ENRICH_POLICY_NAME", "ENRICH_LINE_COMMENT", - "ENRICH_MULTILINE_COMMENT", "ENRICH_WS", "ENRICH_FIELD_LINE_COMMENT", - "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "MVEXPAND_LINE_COMMENT", - "MVEXPAND_MULTILINE_COMMENT", "MVEXPAND_WS", "INFO", "SHOW_LINE_COMMENT", - "SHOW_MULTILINE_COMMENT", "SHOW_WS", "SETTING", "SETTING_LINE_COMMENT", - "SETTTING_MULTILINE_COMMENT", "SETTING_WS", "LOOKUP_LINE_COMMENT", "LOOKUP_MULTILINE_COMMENT", - "LOOKUP_WS", "LOOKUP_FIELD_LINE_COMMENT", "LOOKUP_FIELD_MULTILINE_COMMENT", - "LOOKUP_FIELD_WS", "METRICS_LINE_COMMENT", "METRICS_MULTILINE_COMMENT", - "METRICS_WS", "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", + null, "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", + "KEEP", "LIMIT", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", + "WHERE", "DEV_INLINESTATS", "DEV_LOOKUP", "DEV_METRICS", "DEV_JOIN", + "DEV_JOIN_FULL", "DEV_JOIN_LEFT", "DEV_JOIN_RIGHT", "DEV_JOIN_LOOKUP", + "UNKNOWN_CMD", "LINE_COMMENT", "MULTILINE_COMMENT", "WS", "PIPE", "QUOTED_STRING", + "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", + "COLON", "COMMA", "DESC", "DOT", "FALSE", "FIRST", "IN", "IS", "LAST", + "LIKE", "LP", "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", + "EQ", "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", + "UNQUOTED_IDENTIFIER", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", + "EXPR_WS", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", + "METADATA", "UNQUOTED_SOURCE", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", + "FROM_WS", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", + "PROJECT_WS", "AS", "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", + "RENAME_WS", "ON", "WITH", "ENRICH_POLICY_NAME", "ENRICH_LINE_COMMENT", + "ENRICH_MULTILINE_COMMENT", "ENRICH_WS", "ENRICH_FIELD_LINE_COMMENT", + "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "MVEXPAND_LINE_COMMENT", + "MVEXPAND_MULTILINE_COMMENT", "MVEXPAND_WS", "INFO", "SHOW_LINE_COMMENT", + "SHOW_MULTILINE_COMMENT", "SHOW_WS", "SETTING", "SETTING_LINE_COMMENT", + "SETTTING_MULTILINE_COMMENT", "SETTING_WS", "LOOKUP_LINE_COMMENT", "LOOKUP_MULTILINE_COMMENT", + "LOOKUP_WS", "LOOKUP_FIELD_LINE_COMMENT", "LOOKUP_FIELD_MULTILINE_COMMENT", + "LOOKUP_FIELD_WS", "USING", "JOIN_LINE_COMMENT", "JOIN_MULTILINE_COMMENT", + "JOIN_WS", "METRICS_LINE_COMMENT", "METRICS_MULTILINE_COMMENT", "METRICS_WS", + "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", "CLOSING_METRICS_WS" }; } @@ -228,23 +236,31 @@ public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { return DEV_LOOKUP_sempred((RuleContext)_localctx, predIndex); case 18: return DEV_METRICS_sempred((RuleContext)_localctx, predIndex); - case 73: - return EXPRESSION_COLON_sempred((RuleContext)_localctx, predIndex); - case 106: + case 19: + return DEV_JOIN_sempred((RuleContext)_localctx, predIndex); + case 20: + return DEV_JOIN_FULL_sempred((RuleContext)_localctx, predIndex); + case 21: + return DEV_JOIN_LEFT_sempred((RuleContext)_localctx, predIndex); + case 22: + return DEV_JOIN_RIGHT_sempred((RuleContext)_localctx, predIndex); + case 23: + return DEV_JOIN_LOOKUP_sempred((RuleContext)_localctx, predIndex); + case 110: return PROJECT_PARAM_sempred((RuleContext)_localctx, predIndex); - case 107: + case 111: return PROJECT_NAMED_OR_POSITIONAL_PARAM_sempred((RuleContext)_localctx, predIndex); - case 118: + case 122: return RENAME_PARAM_sempred((RuleContext)_localctx, predIndex); - case 119: + case 123: return RENAME_NAMED_OR_POSITIONAL_PARAM_sempred((RuleContext)_localctx, predIndex); - case 142: + case 146: return ENRICH_FIELD_PARAM_sempred((RuleContext)_localctx, predIndex); - case 143: + case 147: return ENRICH_FIELD_NAMED_OR_POSITIONAL_PARAM_sempred((RuleContext)_localctx, predIndex); - case 149: + case 153: return MVEXPAND_PARAM_sempred((RuleContext)_localctx, predIndex); - case 150: + case 154: return MVEXPAND_NAMED_OR_POSITIONAL_PARAM_sempred((RuleContext)_localctx, predIndex); } return true; @@ -270,1034 +286,1142 @@ private boolean DEV_METRICS_sempred(RuleContext _localctx, int predIndex) { } return true; } - private boolean EXPRESSION_COLON_sempred(RuleContext _localctx, int predIndex) { + private boolean DEV_JOIN_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 3: return this.isDevVersion(); } return true; } - private boolean PROJECT_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean DEV_JOIN_FULL_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 4: return this.isDevVersion(); } return true; } - private boolean PROJECT_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean DEV_JOIN_LEFT_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 5: return this.isDevVersion(); } return true; } - private boolean RENAME_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean DEV_JOIN_RIGHT_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 6: return this.isDevVersion(); } return true; } - private boolean RENAME_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean DEV_JOIN_LOOKUP_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 7: return this.isDevVersion(); } return true; } - private boolean ENRICH_FIELD_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean PROJECT_PARAM_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 8: return this.isDevVersion(); } return true; } - private boolean ENRICH_FIELD_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean PROJECT_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 9: return this.isDevVersion(); } return true; } - private boolean MVEXPAND_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean RENAME_PARAM_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 10: return this.isDevVersion(); } return true; } - private boolean MVEXPAND_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { + private boolean RENAME_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 11: return this.isDevVersion(); } return true; } + private boolean ENRICH_FIELD_PARAM_sempred(RuleContext _localctx, int predIndex) { + switch (predIndex) { + case 12: + return this.isDevVersion(); + } + return true; + } + private boolean ENRICH_FIELD_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { + switch (predIndex) { + case 13: + return this.isDevVersion(); + } + return true; + } + private boolean MVEXPAND_PARAM_sempred(RuleContext _localctx, int predIndex) { + switch (predIndex) { + case 14: + return this.isDevVersion(); + } + return true; + } + private boolean MVEXPAND_NAMED_OR_POSITIONAL_PARAM_sempred(RuleContext _localctx, int predIndex) { + switch (predIndex) { + case 15: + return this.isDevVersion(); + } + return true; + } public static final String _serializedATN = - "\u0004\u0000w\u05cc\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ - "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ - "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ - "\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff"+ - "\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002\u0002\u0007\u0002"+ - "\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002\u0005\u0007\u0005"+ - "\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002\b\u0007\b\u0002"+ - "\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002\f\u0007\f\u0002"+ - "\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007\u000f\u0002\u0010"+ - "\u0007\u0010\u0002\u0011\u0007\u0011\u0002\u0012\u0007\u0012\u0002\u0013"+ - "\u0007\u0013\u0002\u0014\u0007\u0014\u0002\u0015\u0007\u0015\u0002\u0016"+ - "\u0007\u0016\u0002\u0017\u0007\u0017\u0002\u0018\u0007\u0018\u0002\u0019"+ - "\u0007\u0019\u0002\u001a\u0007\u001a\u0002\u001b\u0007\u001b\u0002\u001c"+ - "\u0007\u001c\u0002\u001d\u0007\u001d\u0002\u001e\u0007\u001e\u0002\u001f"+ - "\u0007\u001f\u0002 \u0007 \u0002!\u0007!\u0002\"\u0007\"\u0002#\u0007"+ - "#\u0002$\u0007$\u0002%\u0007%\u0002&\u0007&\u0002\'\u0007\'\u0002(\u0007"+ - "(\u0002)\u0007)\u0002*\u0007*\u0002+\u0007+\u0002,\u0007,\u0002-\u0007"+ - "-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u00021\u00071\u00022\u0007"+ - "2\u00023\u00073\u00024\u00074\u00025\u00075\u00026\u00076\u00027\u0007"+ - "7\u00028\u00078\u00029\u00079\u0002:\u0007:\u0002;\u0007;\u0002<\u0007"+ - "<\u0002=\u0007=\u0002>\u0007>\u0002?\u0007?\u0002@\u0007@\u0002A\u0007"+ - "A\u0002B\u0007B\u0002C\u0007C\u0002D\u0007D\u0002E\u0007E\u0002F\u0007"+ - "F\u0002G\u0007G\u0002H\u0007H\u0002I\u0007I\u0002J\u0007J\u0002K\u0007"+ - "K\u0002L\u0007L\u0002M\u0007M\u0002N\u0007N\u0002O\u0007O\u0002P\u0007"+ - "P\u0002Q\u0007Q\u0002R\u0007R\u0002S\u0007S\u0002T\u0007T\u0002U\u0007"+ - "U\u0002V\u0007V\u0002W\u0007W\u0002X\u0007X\u0002Y\u0007Y\u0002Z\u0007"+ - "Z\u0002[\u0007[\u0002\\\u0007\\\u0002]\u0007]\u0002^\u0007^\u0002_\u0007"+ - "_\u0002`\u0007`\u0002a\u0007a\u0002b\u0007b\u0002c\u0007c\u0002d\u0007"+ - "d\u0002e\u0007e\u0002f\u0007f\u0002g\u0007g\u0002h\u0007h\u0002i\u0007"+ - "i\u0002j\u0007j\u0002k\u0007k\u0002l\u0007l\u0002m\u0007m\u0002n\u0007"+ - "n\u0002o\u0007o\u0002p\u0007p\u0002q\u0007q\u0002r\u0007r\u0002s\u0007"+ - "s\u0002t\u0007t\u0002u\u0007u\u0002v\u0007v\u0002w\u0007w\u0002x\u0007"+ - "x\u0002y\u0007y\u0002z\u0007z\u0002{\u0007{\u0002|\u0007|\u0002}\u0007"+ - "}\u0002~\u0007~\u0002\u007f\u0007\u007f\u0002\u0080\u0007\u0080\u0002"+ - "\u0081\u0007\u0081\u0002\u0082\u0007\u0082\u0002\u0083\u0007\u0083\u0002"+ - "\u0084\u0007\u0084\u0002\u0085\u0007\u0085\u0002\u0086\u0007\u0086\u0002"+ - "\u0087\u0007\u0087\u0002\u0088\u0007\u0088\u0002\u0089\u0007\u0089\u0002"+ - "\u008a\u0007\u008a\u0002\u008b\u0007\u008b\u0002\u008c\u0007\u008c\u0002"+ - "\u008d\u0007\u008d\u0002\u008e\u0007\u008e\u0002\u008f\u0007\u008f\u0002"+ - "\u0090\u0007\u0090\u0002\u0091\u0007\u0091\u0002\u0092\u0007\u0092\u0002"+ - "\u0093\u0007\u0093\u0002\u0094\u0007\u0094\u0002\u0095\u0007\u0095\u0002"+ - "\u0096\u0007\u0096\u0002\u0097\u0007\u0097\u0002\u0098\u0007\u0098\u0002"+ - "\u0099\u0007\u0099\u0002\u009a\u0007\u009a\u0002\u009b\u0007\u009b\u0002"+ - "\u009c\u0007\u009c\u0002\u009d\u0007\u009d\u0002\u009e\u0007\u009e\u0002"+ - "\u009f\u0007\u009f\u0002\u00a0\u0007\u00a0\u0002\u00a1\u0007\u00a1\u0002"+ - "\u00a2\u0007\u00a2\u0002\u00a3\u0007\u00a3\u0002\u00a4\u0007\u00a4\u0002"+ - "\u00a5\u0007\u00a5\u0002\u00a6\u0007\u00a6\u0002\u00a7\u0007\u00a7\u0002"+ - "\u00a8\u0007\u00a8\u0002\u00a9\u0007\u00a9\u0002\u00aa\u0007\u00aa\u0002"+ - "\u00ab\u0007\u00ab\u0002\u00ac\u0007\u00ac\u0002\u00ad\u0007\u00ad\u0002"+ - "\u00ae\u0007\u00ae\u0002\u00af\u0007\u00af\u0002\u00b0\u0007\u00b0\u0002"+ - "\u00b1\u0007\u00b1\u0002\u00b2\u0007\u00b2\u0002\u00b3\u0007\u00b3\u0002"+ - "\u00b4\u0007\u00b4\u0002\u00b5\u0007\u00b5\u0002\u00b6\u0007\u00b6\u0002"+ - "\u00b7\u0007\u00b7\u0002\u00b8\u0007\u00b8\u0002\u00b9\u0007\u00b9\u0002"+ - "\u00ba\u0007\u00ba\u0002\u00bb\u0007\u00bb\u0002\u00bc\u0007\u00bc\u0002"+ - "\u00bd\u0007\u00bd\u0002\u00be\u0007\u00be\u0002\u00bf\u0007\u00bf\u0002"+ - "\u00c0\u0007\u00c0\u0002\u00c1\u0007\u00c1\u0002\u00c2\u0007\u00c2\u0002"+ - "\u00c3\u0007\u00c3\u0002\u00c4\u0007\u00c4\u0002\u00c5\u0007\u00c5\u0002"+ - "\u00c6\u0007\u00c6\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ - "\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ + "\u0004\u0000\u0080\u0641\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff"+ + "\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff"+ + "\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff"+ + "\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff\uffff\u0006\uffff"+ + "\uffff\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ + "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ + "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ + "\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007"+ + "\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002\u0012\u0007"+ + "\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002\u0015\u0007"+ + "\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002\u0018\u0007"+ + "\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002\u001b\u0007"+ + "\u001b\u0002\u001c\u0007\u001c\u0002\u001d\u0007\u001d\u0002\u001e\u0007"+ + "\u001e\u0002\u001f\u0007\u001f\u0002 \u0007 \u0002!\u0007!\u0002\"\u0007"+ + "\"\u0002#\u0007#\u0002$\u0007$\u0002%\u0007%\u0002&\u0007&\u0002\'\u0007"+ + "\'\u0002(\u0007(\u0002)\u0007)\u0002*\u0007*\u0002+\u0007+\u0002,\u0007"+ + ",\u0002-\u0007-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u00021\u0007"+ + "1\u00022\u00072\u00023\u00073\u00024\u00074\u00025\u00075\u00026\u0007"+ + "6\u00027\u00077\u00028\u00078\u00029\u00079\u0002:\u0007:\u0002;\u0007"+ + ";\u0002<\u0007<\u0002=\u0007=\u0002>\u0007>\u0002?\u0007?\u0002@\u0007"+ + "@\u0002A\u0007A\u0002B\u0007B\u0002C\u0007C\u0002D\u0007D\u0002E\u0007"+ + "E\u0002F\u0007F\u0002G\u0007G\u0002H\u0007H\u0002I\u0007I\u0002J\u0007"+ + "J\u0002K\u0007K\u0002L\u0007L\u0002M\u0007M\u0002N\u0007N\u0002O\u0007"+ + "O\u0002P\u0007P\u0002Q\u0007Q\u0002R\u0007R\u0002S\u0007S\u0002T\u0007"+ + "T\u0002U\u0007U\u0002V\u0007V\u0002W\u0007W\u0002X\u0007X\u0002Y\u0007"+ + "Y\u0002Z\u0007Z\u0002[\u0007[\u0002\\\u0007\\\u0002]\u0007]\u0002^\u0007"+ + "^\u0002_\u0007_\u0002`\u0007`\u0002a\u0007a\u0002b\u0007b\u0002c\u0007"+ + "c\u0002d\u0007d\u0002e\u0007e\u0002f\u0007f\u0002g\u0007g\u0002h\u0007"+ + "h\u0002i\u0007i\u0002j\u0007j\u0002k\u0007k\u0002l\u0007l\u0002m\u0007"+ + "m\u0002n\u0007n\u0002o\u0007o\u0002p\u0007p\u0002q\u0007q\u0002r\u0007"+ + "r\u0002s\u0007s\u0002t\u0007t\u0002u\u0007u\u0002v\u0007v\u0002w\u0007"+ + "w\u0002x\u0007x\u0002y\u0007y\u0002z\u0007z\u0002{\u0007{\u0002|\u0007"+ + "|\u0002}\u0007}\u0002~\u0007~\u0002\u007f\u0007\u007f\u0002\u0080\u0007"+ + "\u0080\u0002\u0081\u0007\u0081\u0002\u0082\u0007\u0082\u0002\u0083\u0007"+ + "\u0083\u0002\u0084\u0007\u0084\u0002\u0085\u0007\u0085\u0002\u0086\u0007"+ + "\u0086\u0002\u0087\u0007\u0087\u0002\u0088\u0007\u0088\u0002\u0089\u0007"+ + "\u0089\u0002\u008a\u0007\u008a\u0002\u008b\u0007\u008b\u0002\u008c\u0007"+ + "\u008c\u0002\u008d\u0007\u008d\u0002\u008e\u0007\u008e\u0002\u008f\u0007"+ + "\u008f\u0002\u0090\u0007\u0090\u0002\u0091\u0007\u0091\u0002\u0092\u0007"+ + "\u0092\u0002\u0093\u0007\u0093\u0002\u0094\u0007\u0094\u0002\u0095\u0007"+ + "\u0095\u0002\u0096\u0007\u0096\u0002\u0097\u0007\u0097\u0002\u0098\u0007"+ + "\u0098\u0002\u0099\u0007\u0099\u0002\u009a\u0007\u009a\u0002\u009b\u0007"+ + "\u009b\u0002\u009c\u0007\u009c\u0002\u009d\u0007\u009d\u0002\u009e\u0007"+ + "\u009e\u0002\u009f\u0007\u009f\u0002\u00a0\u0007\u00a0\u0002\u00a1\u0007"+ + "\u00a1\u0002\u00a2\u0007\u00a2\u0002\u00a3\u0007\u00a3\u0002\u00a4\u0007"+ + "\u00a4\u0002\u00a5\u0007\u00a5\u0002\u00a6\u0007\u00a6\u0002\u00a7\u0007"+ + "\u00a7\u0002\u00a8\u0007\u00a8\u0002\u00a9\u0007\u00a9\u0002\u00aa\u0007"+ + "\u00aa\u0002\u00ab\u0007\u00ab\u0002\u00ac\u0007\u00ac\u0002\u00ad\u0007"+ + "\u00ad\u0002\u00ae\u0007\u00ae\u0002\u00af\u0007\u00af\u0002\u00b0\u0007"+ + "\u00b0\u0002\u00b1\u0007\u00b1\u0002\u00b2\u0007\u00b2\u0002\u00b3\u0007"+ + "\u00b3\u0002\u00b4\u0007\u00b4\u0002\u00b5\u0007\u00b5\u0002\u00b6\u0007"+ + "\u00b6\u0002\u00b7\u0007\u00b7\u0002\u00b8\u0007\u00b8\u0002\u00b9\u0007"+ + "\u00b9\u0002\u00ba\u0007\u00ba\u0002\u00bb\u0007\u00bb\u0002\u00bc\u0007"+ + "\u00bc\u0002\u00bd\u0007\u00bd\u0002\u00be\u0007\u00be\u0002\u00bf\u0007"+ + "\u00bf\u0002\u00c0\u0007\u00c0\u0002\u00c1\u0007\u00c1\u0002\u00c2\u0007"+ + "\u00c2\u0002\u00c3\u0007\u00c3\u0002\u00c4\u0007\u00c4\u0002\u00c5\u0007"+ + "\u00c5\u0002\u00c6\u0007\u00c6\u0002\u00c7\u0007\u00c7\u0002\u00c8\u0007"+ + "\u00c8\u0002\u00c9\u0007\u00c9\u0002\u00ca\u0007\u00ca\u0002\u00cb\u0007"+ + "\u00cb\u0002\u00cc\u0007\u00cc\u0002\u00cd\u0007\u00cd\u0002\u00ce\u0007"+ + "\u00ce\u0002\u00cf\u0007\u00cf\u0002\u00d0\u0007\u00d0\u0002\u00d1\u0007"+ + "\u00d1\u0002\u00d2\u0007\u00d2\u0002\u00d3\u0007\u00d3\u0002\u00d4\u0007"+ + "\u00d4\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ + "\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001"+ "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ + "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001\u0004\u0001"+ "\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001"+ - "\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001"+ - "\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007\u0001"+ - "\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001"+ - "\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001"+ + "\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ + "\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001"+ + "\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ + "\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001"+ "\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001"+ - "\t\u0001\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001"+ - "\n\u0001\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b"+ - "\u0001\u000b\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001"+ - "\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001\u000e"+ - "\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e"+ - "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f"+ - "\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010"+ - "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010"+ - "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011"+ - "\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011"+ - "\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0013\u0004\u0013\u0244\b\u0013\u000b\u0013"+ - "\f\u0013\u0245\u0001\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014"+ - "\u0001\u0014\u0005\u0014\u024e\b\u0014\n\u0014\f\u0014\u0251\t\u0014\u0001"+ - "\u0014\u0003\u0014\u0254\b\u0014\u0001\u0014\u0003\u0014\u0257\b\u0014"+ - "\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015"+ - "\u0001\u0015\u0005\u0015\u0260\b\u0015\n\u0015\f\u0015\u0263\t\u0015\u0001"+ - "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0016\u0004"+ - "\u0016\u026b\b\u0016\u000b\u0016\f\u0016\u026c\u0001\u0016\u0001\u0016"+ - "\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018"+ - "\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b"+ - "\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001d\u0001\u001d\u0003\u001d"+ - "\u0282\b\u001d\u0001\u001d\u0004\u001d\u0285\b\u001d\u000b\u001d\f\u001d"+ - "\u0286\u0001\u001e\u0001\u001e\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001"+ - " \u0003 \u0290\b \u0001!\u0001!\u0001\"\u0001\"\u0001\"\u0003\"\u0297"+ - "\b\"\u0001#\u0001#\u0001#\u0005#\u029c\b#\n#\f#\u029f\t#\u0001#\u0001"+ - "#\u0001#\u0001#\u0001#\u0001#\u0005#\u02a7\b#\n#\f#\u02aa\t#\u0001#\u0001"+ - "#\u0001#\u0001#\u0001#\u0003#\u02b1\b#\u0001#\u0003#\u02b4\b#\u0003#\u02b6"+ - "\b#\u0001$\u0004$\u02b9\b$\u000b$\f$\u02ba\u0001%\u0004%\u02be\b%\u000b"+ - "%\f%\u02bf\u0001%\u0001%\u0005%\u02c4\b%\n%\f%\u02c7\t%\u0001%\u0001%"+ - "\u0004%\u02cb\b%\u000b%\f%\u02cc\u0001%\u0004%\u02d0\b%\u000b%\f%\u02d1"+ - "\u0001%\u0001%\u0005%\u02d6\b%\n%\f%\u02d9\t%\u0003%\u02db\b%\u0001%\u0001"+ - "%\u0001%\u0001%\u0004%\u02e1\b%\u000b%\f%\u02e2\u0001%\u0001%\u0003%\u02e7"+ - "\b%\u0001&\u0001&\u0001&\u0001\'\u0001\'\u0001\'\u0001\'\u0001(\u0001"+ - "(\u0001(\u0001(\u0001)\u0001)\u0001*\u0001*\u0001*\u0001+\u0001+\u0001"+ - ",\u0001,\u0001,\u0001,\u0001,\u0001-\u0001-\u0001.\u0001.\u0001.\u0001"+ - ".\u0001.\u0001.\u0001/\u0001/\u0001/\u0001/\u0001/\u0001/\u00010\u0001"+ - "0\u00010\u00011\u00011\u00011\u00012\u00012\u00012\u00012\u00012\u0001"+ - "3\u00013\u00013\u00013\u00013\u00014\u00014\u00015\u00015\u00015\u0001"+ - "5\u00016\u00016\u00016\u00016\u00016\u00017\u00017\u00017\u00017\u0001"+ - "7\u00017\u00018\u00018\u00018\u00019\u00019\u0001:\u0001:\u0001:\u0001"+ - ":\u0001:\u0001:\u0001;\u0001;\u0001<\u0001<\u0001<\u0001<\u0001<\u0001"+ - "=\u0001=\u0001=\u0001>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001@\u0001"+ - "@\u0001A\u0001A\u0001A\u0001B\u0001B\u0001C\u0001C\u0001C\u0001D\u0001"+ - "D\u0001E\u0001E\u0001F\u0001F\u0001G\u0001G\u0001H\u0001H\u0001I\u0001"+ - "I\u0001I\u0001I\u0001I\u0001J\u0001J\u0001J\u0001J\u0001K\u0001K\u0001"+ - "K\u0003K\u036a\bK\u0001K\u0005K\u036d\bK\nK\fK\u0370\tK\u0001K\u0001K"+ - "\u0004K\u0374\bK\u000bK\fK\u0375\u0003K\u0378\bK\u0001L\u0001L\u0001L"+ - "\u0001L\u0001L\u0001M\u0001M\u0001M\u0001M\u0001M\u0001N\u0001N\u0005"+ - "N\u0386\bN\nN\fN\u0389\tN\u0001N\u0001N\u0003N\u038d\bN\u0001N\u0004N"+ - "\u0390\bN\u000bN\fN\u0391\u0003N\u0394\bN\u0001O\u0001O\u0004O\u0398\b"+ - "O\u000bO\fO\u0399\u0001O\u0001O\u0001P\u0001P\u0001Q\u0001Q\u0001Q\u0001"+ - "Q\u0001R\u0001R\u0001R\u0001R\u0001S\u0001S\u0001S\u0001S\u0001T\u0001"+ - "T\u0001T\u0001T\u0001T\u0001U\u0001U\u0001U\u0001U\u0001U\u0001V\u0001"+ - "V\u0001V\u0001V\u0001W\u0001W\u0001W\u0001W\u0001X\u0001X\u0001X\u0001"+ - "X\u0001Y\u0001Y\u0001Y\u0001Y\u0001Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001"+ - "[\u0001[\u0001[\u0001[\u0001\\\u0001\\\u0001\\\u0001\\\u0001]\u0001]\u0001"+ - "]\u0001]\u0001^\u0001^\u0001^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001"+ - "_\u0001_\u0001_\u0001_\u0001_\u0001`\u0001`\u0001`\u0003`\u03e7\b`\u0001"+ - "a\u0004a\u03ea\ba\u000ba\fa\u03eb\u0001b\u0001b\u0001b\u0001b\u0001c\u0001"+ - "c\u0001c\u0001c\u0001d\u0001d\u0001d\u0001d\u0001e\u0001e\u0001e\u0001"+ - "e\u0001f\u0001f\u0001f\u0001f\u0001g\u0001g\u0001g\u0001g\u0001g\u0001"+ - "h\u0001h\u0001h\u0001h\u0001i\u0001i\u0001i\u0001i\u0001j\u0001j\u0001"+ - "j\u0001j\u0001j\u0001k\u0001k\u0001k\u0001k\u0001k\u0001l\u0001l\u0001"+ - "l\u0001l\u0003l\u041d\bl\u0001m\u0001m\u0003m\u0421\bm\u0001m\u0005m\u0424"+ - "\bm\nm\fm\u0427\tm\u0001m\u0001m\u0003m\u042b\bm\u0001m\u0004m\u042e\b"+ - "m\u000bm\fm\u042f\u0003m\u0432\bm\u0001n\u0001n\u0004n\u0436\bn\u000b"+ - "n\fn\u0437\u0001o\u0001o\u0001o\u0001o\u0001p\u0001p\u0001p\u0001p\u0001"+ - "q\u0001q\u0001q\u0001q\u0001r\u0001r\u0001r\u0001r\u0001r\u0001s\u0001"+ - "s\u0001s\u0001s\u0001t\u0001t\u0001t\u0001t\u0001u\u0001u\u0001u\u0001"+ - "u\u0001v\u0001v\u0001v\u0001v\u0001v\u0001w\u0001w\u0001w\u0001w\u0001"+ - "w\u0001x\u0001x\u0001x\u0001y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001"+ - "z\u0001z\u0001{\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001|\u0001"+ - "}\u0001}\u0001}\u0001}\u0001}\u0001~\u0001~\u0001~\u0001~\u0001~\u0001"+ - "\u007f\u0001\u007f\u0001\u007f\u0001\u007f\u0001\u007f\u0001\u0080\u0001"+ - "\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001"+ - "\u0081\u0001\u0081\u0001\u0082\u0004\u0082\u048d\b\u0082\u000b\u0082\f"+ - "\u0082\u048e\u0001\u0082\u0001\u0082\u0003\u0082\u0493\b\u0082\u0001\u0082"+ - "\u0004\u0082\u0496\b\u0082\u000b\u0082\f\u0082\u0497\u0001\u0083\u0001"+ - "\u0083\u0001\u0083\u0001\u0083\u0001\u0084\u0001\u0084\u0001\u0084\u0001"+ - "\u0084\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0085\u0001\u0086\u0001"+ - "\u0086\u0001\u0086\u0001\u0086\u0001\u0087\u0001\u0087\u0001\u0087\u0001"+ - "\u0087\u0001\u0087\u0001\u0087\u0001\u0088\u0001\u0088\u0001\u0088\u0001"+ - "\u0088\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u0089\u0001\u008a\u0001"+ - "\u008a\u0001\u008a\u0001\u008a\u0001\u008b\u0001\u008b\u0001\u008b\u0001"+ - "\u008b\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008d\u0001"+ - "\u008d\u0001\u008d\u0001\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001"+ - "\u008e\u0001\u008e\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u008f\u0001"+ - "\u008f\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0090\u0001\u0091\u0001"+ - "\u0091\u0001\u0091\u0001\u0091\u0001\u0092\u0001\u0092\u0001\u0092\u0001"+ - "\u0092\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001"+ - "\u0094\u0001\u0094\u0001\u0094\u0001\u0094\u0001\u0095\u0001\u0095\u0001"+ - "\u0095\u0001\u0095\u0001\u0095\u0001\u0096\u0001\u0096\u0001\u0096\u0001"+ - "\u0096\u0001\u0096\u0001\u0097\u0001\u0097\u0001\u0097\u0001\u0097\u0001"+ - "\u0098\u0001\u0098\u0001\u0098\u0001\u0098\u0001\u0099\u0001\u0099\u0001"+ - "\u0099\u0001\u0099\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001"+ - "\u009b\u0001\u009b\u0001\u009b\u0001\u009b\u0001\u009c\u0001\u009c\u0001"+ - "\u009c\u0001\u009c\u0001\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001"+ - "\u009d\u0001\u009d\u0001\u009e\u0001\u009e\u0001\u009e\u0001\u009e\u0001"+ - "\u009f\u0001\u009f\u0001\u009f\u0001\u009f\u0001\u00a0\u0001\u00a0\u0001"+ - "\u00a0\u0001\u00a0\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001"+ - "\u00a1\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3\u0001"+ - "\u00a3\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0004\u00a3\u0529\b\u00a3\u000b"+ - "\u00a3\f\u00a3\u052a\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001"+ + "\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001"+ + "\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b"+ + "\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\f\u0001\r\u0001"+ + "\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001"+ + "\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ + "\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001"+ + "\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001"+ + "\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001"+ + "\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001"+ + "\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001"+ + "\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0012\u0001"+ + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001"+ + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013\u0001"+ + "\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001"+ + "\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001"+ + "\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001"+ + "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001"+ + "\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001"+ + "\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018\u0004"+ + "\u0018\u028e\b\u0018\u000b\u0018\f\u0018\u028f\u0001\u0018\u0001\u0018"+ + "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0005\u0019\u0298\b\u0019"+ + "\n\u0019\f\u0019\u029b\t\u0019\u0001\u0019\u0003\u0019\u029e\b\u0019\u0001"+ + "\u0019\u0003\u0019\u02a1\b\u0019\u0001\u0019\u0001\u0019\u0001\u001a\u0001"+ + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0005\u001a\u02aa\b\u001a\n"+ + "\u001a\f\u001a\u02ad\t\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ + "\u001a\u0001\u001a\u0001\u001b\u0004\u001b\u02b5\b\u001b\u000b\u001b\f"+ + "\u001b\u02b6\u0001\u001b\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001c"+ + "\u0001\u001c\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001\u001f"+ + "\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001!\u0001!\u0003!\u02ca\b!\u0001"+ + "!\u0004!\u02cd\b!\u000b!\f!\u02ce\u0001\"\u0001\"\u0001#\u0001#\u0001"+ + "$\u0001$\u0001$\u0003$\u02d8\b$\u0001%\u0001%\u0001&\u0001&\u0001&\u0003"+ + "&\u02df\b&\u0001\'\u0001\'\u0001\'\u0005\'\u02e4\b\'\n\'\f\'\u02e7\t\'"+ + "\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0005\'\u02ef\b\'\n\'"+ + "\f\'\u02f2\t\'\u0001\'\u0001\'\u0001\'\u0001\'\u0001\'\u0003\'\u02f9\b"+ + "\'\u0001\'\u0003\'\u02fc\b\'\u0003\'\u02fe\b\'\u0001(\u0004(\u0301\b("+ + "\u000b(\f(\u0302\u0001)\u0004)\u0306\b)\u000b)\f)\u0307\u0001)\u0001)"+ + "\u0005)\u030c\b)\n)\f)\u030f\t)\u0001)\u0001)\u0004)\u0313\b)\u000b)\f"+ + ")\u0314\u0001)\u0004)\u0318\b)\u000b)\f)\u0319\u0001)\u0001)\u0005)\u031e"+ + "\b)\n)\f)\u0321\t)\u0003)\u0323\b)\u0001)\u0001)\u0001)\u0001)\u0004)"+ + "\u0329\b)\u000b)\f)\u032a\u0001)\u0001)\u0003)\u032f\b)\u0001*\u0001*"+ + "\u0001*\u0001+\u0001+\u0001+\u0001+\u0001,\u0001,\u0001,\u0001,\u0001"+ + "-\u0001-\u0001.\u0001.\u0001.\u0001/\u0001/\u00010\u00010\u00011\u0001"+ + "1\u00011\u00011\u00011\u00012\u00012\u00013\u00013\u00013\u00013\u0001"+ + "3\u00013\u00014\u00014\u00014\u00014\u00014\u00014\u00015\u00015\u0001"+ + "5\u00016\u00016\u00016\u00017\u00017\u00017\u00017\u00017\u00018\u0001"+ + "8\u00018\u00018\u00018\u00019\u00019\u0001:\u0001:\u0001:\u0001:\u0001"+ + ";\u0001;\u0001;\u0001;\u0001;\u0001<\u0001<\u0001<\u0001<\u0001<\u0001"+ + "<\u0001=\u0001=\u0001=\u0001>\u0001>\u0001?\u0001?\u0001?\u0001?\u0001"+ + "?\u0001?\u0001@\u0001@\u0001A\u0001A\u0001A\u0001A\u0001A\u0001B\u0001"+ + "B\u0001B\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001E\u0001E\u0001"+ + "F\u0001F\u0001F\u0001G\u0001G\u0001H\u0001H\u0001H\u0001I\u0001I\u0001"+ + "J\u0001J\u0001K\u0001K\u0001L\u0001L\u0001M\u0001M\u0001N\u0001N\u0001"+ + "N\u0001N\u0001O\u0001O\u0001O\u0003O\u03af\bO\u0001O\u0005O\u03b2\bO\n"+ + "O\fO\u03b5\tO\u0001O\u0001O\u0004O\u03b9\bO\u000bO\fO\u03ba\u0003O\u03bd"+ + "\bO\u0001P\u0001P\u0001P\u0001P\u0001P\u0001Q\u0001Q\u0001Q\u0001Q\u0001"+ + "Q\u0001R\u0001R\u0005R\u03cb\bR\nR\fR\u03ce\tR\u0001R\u0001R\u0003R\u03d2"+ + "\bR\u0001R\u0004R\u03d5\bR\u000bR\fR\u03d6\u0003R\u03d9\bR\u0001S\u0001"+ + "S\u0004S\u03dd\bS\u000bS\fS\u03de\u0001S\u0001S\u0001T\u0001T\u0001U\u0001"+ + "U\u0001U\u0001U\u0001V\u0001V\u0001V\u0001V\u0001W\u0001W\u0001W\u0001"+ + "W\u0001X\u0001X\u0001X\u0001X\u0001X\u0001Y\u0001Y\u0001Y\u0001Y\u0001"+ + "Y\u0001Z\u0001Z\u0001Z\u0001Z\u0001[\u0001[\u0001[\u0001[\u0001\\\u0001"+ + "\\\u0001\\\u0001\\\u0001]\u0001]\u0001]\u0001]\u0001]\u0001^\u0001^\u0001"+ + "^\u0001^\u0001_\u0001_\u0001_\u0001_\u0001`\u0001`\u0001`\u0001`\u0001"+ + "a\u0001a\u0001a\u0001a\u0001b\u0001b\u0001b\u0001b\u0001c\u0001c\u0001"+ + "c\u0001c\u0001c\u0001c\u0001c\u0001c\u0001c\u0001d\u0001d\u0001d\u0003"+ + "d\u042c\bd\u0001e\u0004e\u042f\be\u000be\fe\u0430\u0001f\u0001f\u0001"+ + "f\u0001f\u0001g\u0001g\u0001g\u0001g\u0001h\u0001h\u0001h\u0001h\u0001"+ + "i\u0001i\u0001i\u0001i\u0001j\u0001j\u0001j\u0001j\u0001k\u0001k\u0001"+ + "k\u0001k\u0001k\u0001l\u0001l\u0001l\u0001l\u0001m\u0001m\u0001m\u0001"+ + "m\u0001n\u0001n\u0001n\u0001n\u0001n\u0001o\u0001o\u0001o\u0001o\u0001"+ + "o\u0001p\u0001p\u0001p\u0001p\u0003p\u0462\bp\u0001q\u0001q\u0003q\u0466"+ + "\bq\u0001q\u0005q\u0469\bq\nq\fq\u046c\tq\u0001q\u0001q\u0003q\u0470\b"+ + "q\u0001q\u0004q\u0473\bq\u000bq\fq\u0474\u0003q\u0477\bq\u0001r\u0001"+ + "r\u0004r\u047b\br\u000br\fr\u047c\u0001s\u0001s\u0001s\u0001s\u0001t\u0001"+ + "t\u0001t\u0001t\u0001u\u0001u\u0001u\u0001u\u0001v\u0001v\u0001v\u0001"+ + "v\u0001v\u0001w\u0001w\u0001w\u0001w\u0001x\u0001x\u0001x\u0001x\u0001"+ + "y\u0001y\u0001y\u0001y\u0001z\u0001z\u0001z\u0001z\u0001z\u0001{\u0001"+ + "{\u0001{\u0001{\u0001{\u0001|\u0001|\u0001|\u0001}\u0001}\u0001}\u0001"+ + "}\u0001~\u0001~\u0001~\u0001~\u0001\u007f\u0001\u007f\u0001\u007f\u0001"+ + "\u007f\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0080\u0001\u0081\u0001"+ + "\u0081\u0001\u0081\u0001\u0081\u0001\u0081\u0001\u0082\u0001\u0082\u0001"+ + "\u0082\u0001\u0082\u0001\u0082\u0001\u0083\u0001\u0083\u0001\u0083\u0001"+ + "\u0083\u0001\u0083\u0001\u0084\u0001\u0084\u0001\u0084\u0001\u0084\u0001"+ + "\u0084\u0001\u0084\u0001\u0084\u0001\u0085\u0001\u0085\u0001\u0086\u0004"+ + "\u0086\u04d2\b\u0086\u000b\u0086\f\u0086\u04d3\u0001\u0086\u0001\u0086"+ + "\u0003\u0086\u04d8\b\u0086\u0001\u0086\u0004\u0086\u04db\b\u0086\u000b"+ + "\u0086\f\u0086\u04dc\u0001\u0087\u0001\u0087\u0001\u0087\u0001\u0087\u0001"+ + "\u0088\u0001\u0088\u0001\u0088\u0001\u0088\u0001\u0089\u0001\u0089\u0001"+ + "\u0089\u0001\u0089\u0001\u008a\u0001\u008a\u0001\u008a\u0001\u008a\u0001"+ + "\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001\u008b\u0001"+ + "\u008c\u0001\u008c\u0001\u008c\u0001\u008c\u0001\u008d\u0001\u008d\u0001"+ + "\u008d\u0001\u008d\u0001\u008e\u0001\u008e\u0001\u008e\u0001\u008e\u0001"+ + "\u008f\u0001\u008f\u0001\u008f\u0001\u008f\u0001\u0090\u0001\u0090\u0001"+ + "\u0090\u0001\u0090\u0001\u0091\u0001\u0091\u0001\u0091\u0001\u0091\u0001"+ + "\u0092\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0092\u0001\u0093\u0001"+ + "\u0093\u0001\u0093\u0001\u0093\u0001\u0093\u0001\u0094\u0001\u0094\u0001"+ + "\u0094\u0001\u0094\u0001\u0095\u0001\u0095\u0001\u0095\u0001\u0095\u0001"+ + "\u0096\u0001\u0096\u0001\u0096\u0001\u0096\u0001\u0097\u0001\u0097\u0001"+ + "\u0097\u0001\u0097\u0001\u0097\u0001\u0098\u0001\u0098\u0001\u0098\u0001"+ + "\u0098\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001\u0099\u0001"+ + "\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009a\u0001\u009b\u0001"+ + "\u009b\u0001\u009b\u0001\u009b\u0001\u009c\u0001\u009c\u0001\u009c\u0001"+ + "\u009c\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009d\u0001\u009e\u0001"+ + "\u009e\u0001\u009e\u0001\u009e\u0001\u009f\u0001\u009f\u0001\u009f\u0001"+ + "\u009f\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001\u00a0\u0001"+ + "\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a1\u0001\u00a2\u0001"+ + "\u00a2\u0001\u00a2\u0001\u00a2\u0001\u00a3\u0001\u00a3\u0001\u00a3\u0001"+ + "\u00a3\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a4\u0001\u00a5\u0001"+ "\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a5\u0001\u00a6\u0001\u00a6\u0001"+ "\u00a6\u0001\u00a6\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001\u00a7\u0001"+ - "\u00a7\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a9\u0001"+ - "\u00a9\u0001\u00a9\u0001\u00a9\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001"+ - "\u00aa\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001"+ - "\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ac\u0001\u00ad\u0001\u00ad\u0001"+ - "\u00ad\u0001\u00ad\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001"+ - "\u00af\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00b0\u0001\u00b0\u0001"+ - "\u00b0\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001"+ - "\u00b1\u0001\u00b1\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001"+ - "\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001"+ - "\u00b4\u0001\u00b4\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001"+ - "\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b7\u0001\u00b7\u0001"+ - "\u00b7\u0001\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001"+ - "\u00b8\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001"+ - "\u00b9\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001\u00ba\u0001"+ - "\u00ba\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bc\u0001"+ - "\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bd\u0001\u00bd\u0001\u00bd\u0001"+ - "\u00bd\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be\u0001"+ - "\u00be\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001"+ - "\u00bf\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001"+ - "\u00c1\u0001\u00c1\u0001\u00c1\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001"+ - "\u00c2\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001"+ - "\u00c3\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001\u00c4\u0001"+ - "\u00c4\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001"+ - "\u00c5\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0002"+ - "\u0261\u02a8\u0000\u00c7\u000f\u0001\u0011\u0002\u0013\u0003\u0015\u0004"+ - "\u0017\u0005\u0019\u0006\u001b\u0007\u001d\b\u001f\t!\n#\u000b%\f\'\r"+ - ")\u000e+\u000f-\u0010/\u00111\u00123\u00135\u00147\u00159\u0016;\u0017"+ - "=\u0018?\u0019A\u0000C\u0000E\u0000G\u0000I\u0000K\u0000M\u0000O\u0000"+ - "Q\u0000S\u0000U\u001aW\u001bY\u001c[\u001d]\u001e_\u001fa c!e\"g#i$k%"+ - "m&o\'q(s)u*w+y,{-}.\u007f/\u00810\u00831\u00852\u00873\u00894\u008b5\u008d"+ - "6\u008f7\u00918\u00939\u0095:\u0097;\u0099<\u009b=\u009d>\u009f?\u00a1"+ - "\u0000\u00a3\u0000\u00a5@\u00a7A\u00a9B\u00abC\u00ad\u0000\u00afD\u00b1"+ - "E\u00b3F\u00b5G\u00b7\u0000\u00b9\u0000\u00bbH\u00bdI\u00bfJ\u00c1\u0000"+ - "\u00c3\u0000\u00c5\u0000\u00c7\u0000\u00c9\u0000\u00cb\u0000\u00cdK\u00cf"+ - "\u0000\u00d1L\u00d3\u0000\u00d5\u0000\u00d7M\u00d9N\u00dbO\u00dd\u0000"+ - "\u00df\u0000\u00e1\u0000\u00e3\u0000\u00e5\u0000\u00e7\u0000\u00e9\u0000"+ - "\u00ebP\u00edQ\u00efR\u00f1S\u00f3\u0000\u00f5\u0000\u00f7\u0000\u00f9"+ - "\u0000\u00fb\u0000\u00fd\u0000\u00ffT\u0101\u0000\u0103U\u0105V\u0107"+ - "W\u0109\u0000\u010b\u0000\u010dX\u010fY\u0111\u0000\u0113Z\u0115\u0000"+ - "\u0117[\u0119\\\u011b]\u011d\u0000\u011f\u0000\u0121\u0000\u0123\u0000"+ - "\u0125\u0000\u0127\u0000\u0129\u0000\u012b\u0000\u012d\u0000\u012f^\u0131"+ - "_\u0133`\u0135\u0000\u0137\u0000\u0139\u0000\u013b\u0000\u013d\u0000\u013f"+ - "\u0000\u0141a\u0143b\u0145c\u0147\u0000\u0149d\u014be\u014df\u014fg\u0151"+ - "\u0000\u0153\u0000\u0155h\u0157i\u0159j\u015bk\u015d\u0000\u015f\u0000"+ - "\u0161\u0000\u0163\u0000\u0165\u0000\u0167\u0000\u0169\u0000\u016bl\u016d"+ - "m\u016fn\u0171\u0000\u0173\u0000\u0175\u0000\u0177\u0000\u0179o\u017b"+ - "p\u017dq\u017f\u0000\u0181\u0000\u0183\u0000\u0185r\u0187s\u0189t\u018b"+ - "\u0000\u018d\u0000\u018fu\u0191v\u0193w\u0195\u0000\u0197\u0000\u0199"+ - "\u0000\u019b\u0000\u000f\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007"+ - "\b\t\n\u000b\f\r\u000e#\u0002\u0000DDdd\u0002\u0000IIii\u0002\u0000SS"+ - "ss\u0002\u0000EEee\u0002\u0000CCcc\u0002\u0000TTtt\u0002\u0000RRrr\u0002"+ - "\u0000OOoo\u0002\u0000PPpp\u0002\u0000NNnn\u0002\u0000HHhh\u0002\u0000"+ - "VVvv\u0002\u0000AAaa\u0002\u0000LLll\u0002\u0000XXxx\u0002\u0000FFff\u0002"+ - "\u0000MMmm\u0002\u0000GGgg\u0002\u0000KKkk\u0002\u0000WWww\u0002\u0000"+ - "UUuu\u0006\u0000\t\n\r\r //[[]]\u0002\u0000\n\n\r\r\u0003\u0000\t\n\r"+ - "\r \u0001\u000009\u0002\u0000AZaz\b\u0000\"\"NNRRTT\\\\nnrrtt\u0004\u0000"+ - "\n\n\r\r\"\"\\\\\u0002\u0000++--\u0001\u0000``\u0002\u0000BBbb\u0002\u0000"+ - "YYyy\u000b\u0000\t\n\r\r \"\",,//::==[[]]||\u0002\u0000**//\u000b\u0000"+ - "\t\n\r\r \"#,,//::<<>?\\\\||\u05e8\u0000\u000f\u0001\u0000\u0000\u0000"+ - "\u0000\u0011\u0001\u0000\u0000\u0000\u0000\u0013\u0001\u0000\u0000\u0000"+ - "\u0000\u0015\u0001\u0000\u0000\u0000\u0000\u0017\u0001\u0000\u0000\u0000"+ - "\u0000\u0019\u0001\u0000\u0000\u0000\u0000\u001b\u0001\u0000\u0000\u0000"+ - "\u0000\u001d\u0001\u0000\u0000\u0000\u0000\u001f\u0001\u0000\u0000\u0000"+ - "\u0000!\u0001\u0000\u0000\u0000\u0000#\u0001\u0000\u0000\u0000\u0000%"+ - "\u0001\u0000\u0000\u0000\u0000\'\u0001\u0000\u0000\u0000\u0000)\u0001"+ - "\u0000\u0000\u0000\u0000+\u0001\u0000\u0000\u0000\u0000-\u0001\u0000\u0000"+ - "\u0000\u0000/\u0001\u0000\u0000\u0000\u00001\u0001\u0000\u0000\u0000\u0000"+ - "3\u0001\u0000\u0000\u0000\u00005\u0001\u0000\u0000\u0000\u00007\u0001"+ - "\u0000\u0000\u0000\u00009\u0001\u0000\u0000\u0000\u0000;\u0001\u0000\u0000"+ - "\u0000\u0000=\u0001\u0000\u0000\u0000\u0001?\u0001\u0000\u0000\u0000\u0001"+ - "U\u0001\u0000\u0000\u0000\u0001W\u0001\u0000\u0000\u0000\u0001Y\u0001"+ - "\u0000\u0000\u0000\u0001[\u0001\u0000\u0000\u0000\u0001]\u0001\u0000\u0000"+ - "\u0000\u0001_\u0001\u0000\u0000\u0000\u0001a\u0001\u0000\u0000\u0000\u0001"+ - "c\u0001\u0000\u0000\u0000\u0001e\u0001\u0000\u0000\u0000\u0001g\u0001"+ - "\u0000\u0000\u0000\u0001i\u0001\u0000\u0000\u0000\u0001k\u0001\u0000\u0000"+ - "\u0000\u0001m\u0001\u0000\u0000\u0000\u0001o\u0001\u0000\u0000\u0000\u0001"+ - "q\u0001\u0000\u0000\u0000\u0001s\u0001\u0000\u0000\u0000\u0001u\u0001"+ - "\u0000\u0000\u0000\u0001w\u0001\u0000\u0000\u0000\u0001y\u0001\u0000\u0000"+ - "\u0000\u0001{\u0001\u0000\u0000\u0000\u0001}\u0001\u0000\u0000\u0000\u0001"+ - "\u007f\u0001\u0000\u0000\u0000\u0001\u0081\u0001\u0000\u0000\u0000\u0001"+ - "\u0083\u0001\u0000\u0000\u0000\u0001\u0085\u0001\u0000\u0000\u0000\u0001"+ - "\u0087\u0001\u0000\u0000\u0000\u0001\u0089\u0001\u0000\u0000\u0000\u0001"+ - "\u008b\u0001\u0000\u0000\u0000\u0001\u008d\u0001\u0000\u0000\u0000\u0001"+ - "\u008f\u0001\u0000\u0000\u0000\u0001\u0091\u0001\u0000\u0000\u0000\u0001"+ - "\u0093\u0001\u0000\u0000\u0000\u0001\u0095\u0001\u0000\u0000\u0000\u0001"+ - "\u0097\u0001\u0000\u0000\u0000\u0001\u0099\u0001\u0000\u0000\u0000\u0001"+ - "\u009b\u0001\u0000\u0000\u0000\u0001\u009d\u0001\u0000\u0000\u0000\u0001"+ - "\u009f\u0001\u0000\u0000\u0000\u0001\u00a1\u0001\u0000\u0000\u0000\u0001"+ - "\u00a3\u0001\u0000\u0000\u0000\u0001\u00a5\u0001\u0000\u0000\u0000\u0001"+ - "\u00a7\u0001\u0000\u0000\u0000\u0001\u00a9\u0001\u0000\u0000\u0000\u0001"+ - "\u00ab\u0001\u0000\u0000\u0000\u0001\u00af\u0001\u0000\u0000\u0000\u0001"+ - "\u00b1\u0001\u0000\u0000\u0000\u0001\u00b3\u0001\u0000\u0000\u0000\u0001"+ - "\u00b5\u0001\u0000\u0000\u0000\u0002\u00b7\u0001\u0000\u0000\u0000\u0002"+ - "\u00b9\u0001\u0000\u0000\u0000\u0002\u00bb\u0001\u0000\u0000\u0000\u0002"+ - "\u00bd\u0001\u0000\u0000\u0000\u0002\u00bf\u0001\u0000\u0000\u0000\u0003"+ - "\u00c1\u0001\u0000\u0000\u0000\u0003\u00c3\u0001\u0000\u0000\u0000\u0003"+ - "\u00c5\u0001\u0000\u0000\u0000\u0003\u00c7\u0001\u0000\u0000\u0000\u0003"+ - "\u00c9\u0001\u0000\u0000\u0000\u0003\u00cb\u0001\u0000\u0000\u0000\u0003"+ - "\u00cd\u0001\u0000\u0000\u0000\u0003\u00d1\u0001\u0000\u0000\u0000\u0003"+ - "\u00d3\u0001\u0000\u0000\u0000\u0003\u00d5\u0001\u0000\u0000\u0000\u0003"+ - "\u00d7\u0001\u0000\u0000\u0000\u0003\u00d9\u0001\u0000\u0000\u0000\u0003"+ - "\u00db\u0001\u0000\u0000\u0000\u0004\u00dd\u0001\u0000\u0000\u0000\u0004"+ - "\u00df\u0001\u0000\u0000\u0000\u0004\u00e1\u0001\u0000\u0000\u0000\u0004"+ - "\u00e3\u0001\u0000\u0000\u0000\u0004\u00e5\u0001\u0000\u0000\u0000\u0004"+ - "\u00eb\u0001\u0000\u0000\u0000\u0004\u00ed\u0001\u0000\u0000\u0000\u0004"+ - "\u00ef\u0001\u0000\u0000\u0000\u0004\u00f1\u0001\u0000\u0000\u0000\u0005"+ - "\u00f3\u0001\u0000\u0000\u0000\u0005\u00f5\u0001\u0000\u0000\u0000\u0005"+ - "\u00f7\u0001\u0000\u0000\u0000\u0005\u00f9\u0001\u0000\u0000\u0000\u0005"+ - "\u00fb\u0001\u0000\u0000\u0000\u0005\u00fd\u0001\u0000\u0000\u0000\u0005"+ - "\u00ff\u0001\u0000\u0000\u0000\u0005\u0101\u0001\u0000\u0000\u0000\u0005"+ - "\u0103\u0001\u0000\u0000\u0000\u0005\u0105\u0001\u0000\u0000\u0000\u0005"+ - "\u0107\u0001\u0000\u0000\u0000\u0006\u0109\u0001\u0000\u0000\u0000\u0006"+ - "\u010b\u0001\u0000\u0000\u0000\u0006\u010d\u0001\u0000\u0000\u0000\u0006"+ - "\u010f\u0001\u0000\u0000\u0000\u0006\u0113\u0001\u0000\u0000\u0000\u0006"+ - "\u0115\u0001\u0000\u0000\u0000\u0006\u0117\u0001\u0000\u0000\u0000\u0006"+ - "\u0119\u0001\u0000\u0000\u0000\u0006\u011b\u0001\u0000\u0000\u0000\u0007"+ - "\u011d\u0001\u0000\u0000\u0000\u0007\u011f\u0001\u0000\u0000\u0000\u0007"+ - "\u0121\u0001\u0000\u0000\u0000\u0007\u0123\u0001\u0000\u0000\u0000\u0007"+ - "\u0125\u0001\u0000\u0000\u0000\u0007\u0127\u0001\u0000\u0000\u0000\u0007"+ - "\u0129\u0001\u0000\u0000\u0000\u0007\u012b\u0001\u0000\u0000\u0000\u0007"+ - "\u012d\u0001\u0000\u0000\u0000\u0007\u012f\u0001\u0000\u0000\u0000\u0007"+ - "\u0131\u0001\u0000\u0000\u0000\u0007\u0133\u0001\u0000\u0000\u0000\b\u0135"+ - "\u0001\u0000\u0000\u0000\b\u0137\u0001\u0000\u0000\u0000\b\u0139\u0001"+ - "\u0000\u0000\u0000\b\u013b\u0001\u0000\u0000\u0000\b\u013d\u0001\u0000"+ - "\u0000\u0000\b\u013f\u0001\u0000\u0000\u0000\b\u0141\u0001\u0000\u0000"+ - "\u0000\b\u0143\u0001\u0000\u0000\u0000\b\u0145\u0001\u0000\u0000\u0000"+ - "\t\u0147\u0001\u0000\u0000\u0000\t\u0149\u0001\u0000\u0000\u0000\t\u014b"+ - "\u0001\u0000\u0000\u0000\t\u014d\u0001\u0000\u0000\u0000\t\u014f\u0001"+ - "\u0000\u0000\u0000\n\u0151\u0001\u0000\u0000\u0000\n\u0153\u0001\u0000"+ - "\u0000\u0000\n\u0155\u0001\u0000\u0000\u0000\n\u0157\u0001\u0000\u0000"+ - "\u0000\n\u0159\u0001\u0000\u0000\u0000\n\u015b\u0001\u0000\u0000\u0000"+ - "\u000b\u015d\u0001\u0000\u0000\u0000\u000b\u015f\u0001\u0000\u0000\u0000"+ - "\u000b\u0161\u0001\u0000\u0000\u0000\u000b\u0163\u0001\u0000\u0000\u0000"+ - "\u000b\u0165\u0001\u0000\u0000\u0000\u000b\u0167\u0001\u0000\u0000\u0000"+ - "\u000b\u0169\u0001\u0000\u0000\u0000\u000b\u016b\u0001\u0000\u0000\u0000"+ - "\u000b\u016d\u0001\u0000\u0000\u0000\u000b\u016f\u0001\u0000\u0000\u0000"+ - "\f\u0171\u0001\u0000\u0000\u0000\f\u0173\u0001\u0000\u0000\u0000\f\u0175"+ - "\u0001\u0000\u0000\u0000\f\u0177\u0001\u0000\u0000\u0000\f\u0179\u0001"+ - "\u0000\u0000\u0000\f\u017b\u0001\u0000\u0000\u0000\f\u017d\u0001\u0000"+ - "\u0000\u0000\r\u017f\u0001\u0000\u0000\u0000\r\u0181\u0001\u0000\u0000"+ - "\u0000\r\u0183\u0001\u0000\u0000\u0000\r\u0185\u0001\u0000\u0000\u0000"+ - "\r\u0187\u0001\u0000\u0000\u0000\r\u0189\u0001\u0000\u0000\u0000\u000e"+ - "\u018b\u0001\u0000\u0000\u0000\u000e\u018d\u0001\u0000\u0000\u0000\u000e"+ - "\u018f\u0001\u0000\u0000\u0000\u000e\u0191\u0001\u0000\u0000\u0000\u000e"+ - "\u0193\u0001\u0000\u0000\u0000\u000e\u0195\u0001\u0000\u0000\u0000\u000e"+ - "\u0197\u0001\u0000\u0000\u0000\u000e\u0199\u0001\u0000\u0000\u0000\u000e"+ - "\u019b\u0001\u0000\u0000\u0000\u000f\u019d\u0001\u0000\u0000\u0000\u0011"+ - "\u01a7\u0001\u0000\u0000\u0000\u0013\u01ae\u0001\u0000\u0000\u0000\u0015"+ - "\u01b7\u0001\u0000\u0000\u0000\u0017\u01be\u0001\u0000\u0000\u0000\u0019"+ - "\u01c8\u0001\u0000\u0000\u0000\u001b\u01cf\u0001\u0000\u0000\u0000\u001d"+ - "\u01d6\u0001\u0000\u0000\u0000\u001f\u01dd\u0001\u0000\u0000\u0000!\u01e5"+ - "\u0001\u0000\u0000\u0000#\u01f1\u0001\u0000\u0000\u0000%\u01fa\u0001\u0000"+ - "\u0000\u0000\'\u0200\u0001\u0000\u0000\u0000)\u0207\u0001\u0000\u0000"+ - "\u0000+\u020e\u0001\u0000\u0000\u0000-\u0216\u0001\u0000\u0000\u0000/"+ - "\u021e\u0001\u0000\u0000\u00001\u022d\u0001\u0000\u0000\u00003\u0237\u0001"+ - "\u0000\u0000\u00005\u0243\u0001\u0000\u0000\u00007\u0249\u0001\u0000\u0000"+ - "\u00009\u025a\u0001\u0000\u0000\u0000;\u026a\u0001\u0000\u0000\u0000="+ - "\u0270\u0001\u0000\u0000\u0000?\u0272\u0001\u0000\u0000\u0000A\u0276\u0001"+ - "\u0000\u0000\u0000C\u0278\u0001\u0000\u0000\u0000E\u027a\u0001\u0000\u0000"+ - "\u0000G\u027d\u0001\u0000\u0000\u0000I\u027f\u0001\u0000\u0000\u0000K"+ - "\u0288\u0001\u0000\u0000\u0000M\u028a\u0001\u0000\u0000\u0000O\u028f\u0001"+ - "\u0000\u0000\u0000Q\u0291\u0001\u0000\u0000\u0000S\u0296\u0001\u0000\u0000"+ - "\u0000U\u02b5\u0001\u0000\u0000\u0000W\u02b8\u0001\u0000\u0000\u0000Y"+ - "\u02e6\u0001\u0000\u0000\u0000[\u02e8\u0001\u0000\u0000\u0000]\u02eb\u0001"+ - "\u0000\u0000\u0000_\u02ef\u0001\u0000\u0000\u0000a\u02f3\u0001\u0000\u0000"+ - "\u0000c\u02f5\u0001\u0000\u0000\u0000e\u02f8\u0001\u0000\u0000\u0000g"+ - "\u02fa\u0001\u0000\u0000\u0000i\u02ff\u0001\u0000\u0000\u0000k\u0301\u0001"+ - "\u0000\u0000\u0000m\u0307\u0001\u0000\u0000\u0000o\u030d\u0001\u0000\u0000"+ - "\u0000q\u0310\u0001\u0000\u0000\u0000s\u0313\u0001\u0000\u0000\u0000u"+ - "\u0318\u0001\u0000\u0000\u0000w\u031d\u0001\u0000\u0000\u0000y\u031f\u0001"+ - "\u0000\u0000\u0000{\u0323\u0001\u0000\u0000\u0000}\u0328\u0001\u0000\u0000"+ - "\u0000\u007f\u032e\u0001\u0000\u0000\u0000\u0081\u0331\u0001\u0000\u0000"+ - "\u0000\u0083\u0333\u0001\u0000\u0000\u0000\u0085\u0339\u0001\u0000\u0000"+ - "\u0000\u0087\u033b\u0001\u0000\u0000\u0000\u0089\u0340\u0001\u0000\u0000"+ - "\u0000\u008b\u0343\u0001\u0000\u0000\u0000\u008d\u0346\u0001\u0000\u0000"+ - "\u0000\u008f\u0349\u0001\u0000\u0000\u0000\u0091\u034b\u0001\u0000\u0000"+ - "\u0000\u0093\u034e\u0001\u0000\u0000\u0000\u0095\u0350\u0001\u0000\u0000"+ - "\u0000\u0097\u0353\u0001\u0000\u0000\u0000\u0099\u0355\u0001\u0000\u0000"+ - "\u0000\u009b\u0357\u0001\u0000\u0000\u0000\u009d\u0359\u0001\u0000\u0000"+ - "\u0000\u009f\u035b\u0001\u0000\u0000\u0000\u00a1\u035d\u0001\u0000\u0000"+ - "\u0000\u00a3\u0362\u0001\u0000\u0000\u0000\u00a5\u0377\u0001\u0000\u0000"+ - "\u0000\u00a7\u0379\u0001\u0000\u0000\u0000\u00a9\u037e\u0001\u0000\u0000"+ - "\u0000\u00ab\u0393\u0001\u0000\u0000\u0000\u00ad\u0395\u0001\u0000\u0000"+ - "\u0000\u00af\u039d\u0001\u0000\u0000\u0000\u00b1\u039f\u0001\u0000\u0000"+ - "\u0000\u00b3\u03a3\u0001\u0000\u0000\u0000\u00b5\u03a7\u0001\u0000\u0000"+ - "\u0000\u00b7\u03ab\u0001\u0000\u0000\u0000\u00b9\u03b0\u0001\u0000\u0000"+ - "\u0000\u00bb\u03b5\u0001\u0000\u0000\u0000\u00bd\u03b9\u0001\u0000\u0000"+ - "\u0000\u00bf\u03bd\u0001\u0000\u0000\u0000\u00c1\u03c1\u0001\u0000\u0000"+ - "\u0000\u00c3\u03c6\u0001\u0000\u0000\u0000\u00c5\u03ca\u0001\u0000\u0000"+ - "\u0000\u00c7\u03ce\u0001\u0000\u0000\u0000\u00c9\u03d2\u0001\u0000\u0000"+ - "\u0000\u00cb\u03d6\u0001\u0000\u0000\u0000\u00cd\u03da\u0001\u0000\u0000"+ - "\u0000\u00cf\u03e6\u0001\u0000\u0000\u0000\u00d1\u03e9\u0001\u0000\u0000"+ - "\u0000\u00d3\u03ed\u0001\u0000\u0000\u0000\u00d5\u03f1\u0001\u0000\u0000"+ - "\u0000\u00d7\u03f5\u0001\u0000\u0000\u0000\u00d9\u03f9\u0001\u0000\u0000"+ - "\u0000\u00db\u03fd\u0001\u0000\u0000\u0000\u00dd\u0401\u0001\u0000\u0000"+ - "\u0000\u00df\u0406\u0001\u0000\u0000\u0000\u00e1\u040a\u0001\u0000\u0000"+ - "\u0000\u00e3\u040e\u0001\u0000\u0000\u0000\u00e5\u0413\u0001\u0000\u0000"+ - "\u0000\u00e7\u041c\u0001\u0000\u0000\u0000\u00e9\u0431\u0001\u0000\u0000"+ - "\u0000\u00eb\u0435\u0001\u0000\u0000\u0000\u00ed\u0439\u0001\u0000\u0000"+ - "\u0000\u00ef\u043d\u0001\u0000\u0000\u0000\u00f1\u0441\u0001\u0000\u0000"+ - "\u0000\u00f3\u0445\u0001\u0000\u0000\u0000\u00f5\u044a\u0001\u0000\u0000"+ - "\u0000\u00f7\u044e\u0001\u0000\u0000\u0000\u00f9\u0452\u0001\u0000\u0000"+ - "\u0000\u00fb\u0456\u0001\u0000\u0000\u0000\u00fd\u045b\u0001\u0000\u0000"+ - "\u0000\u00ff\u0460\u0001\u0000\u0000\u0000\u0101\u0463\u0001\u0000\u0000"+ - "\u0000\u0103\u0467\u0001\u0000\u0000\u0000\u0105\u046b\u0001\u0000\u0000"+ - "\u0000\u0107\u046f\u0001\u0000\u0000\u0000\u0109\u0473\u0001\u0000\u0000"+ - "\u0000\u010b\u0478\u0001\u0000\u0000\u0000\u010d\u047d\u0001\u0000\u0000"+ - "\u0000\u010f\u0482\u0001\u0000\u0000\u0000\u0111\u0489\u0001\u0000\u0000"+ - "\u0000\u0113\u0492\u0001\u0000\u0000\u0000\u0115\u0499\u0001\u0000\u0000"+ - "\u0000\u0117\u049d\u0001\u0000\u0000\u0000\u0119\u04a1\u0001\u0000\u0000"+ - "\u0000\u011b\u04a5\u0001\u0000\u0000\u0000\u011d\u04a9\u0001\u0000\u0000"+ - "\u0000\u011f\u04af\u0001\u0000\u0000\u0000\u0121\u04b3\u0001\u0000\u0000"+ - "\u0000\u0123\u04b7\u0001\u0000\u0000\u0000\u0125\u04bb\u0001\u0000\u0000"+ - "\u0000\u0127\u04bf\u0001\u0000\u0000\u0000\u0129\u04c3\u0001\u0000\u0000"+ - "\u0000\u012b\u04c7\u0001\u0000\u0000\u0000\u012d\u04cc\u0001\u0000\u0000"+ - "\u0000\u012f\u04d1\u0001\u0000\u0000\u0000\u0131\u04d5\u0001\u0000\u0000"+ - "\u0000\u0133\u04d9\u0001\u0000\u0000\u0000\u0135\u04dd\u0001\u0000\u0000"+ - "\u0000\u0137\u04e2\u0001\u0000\u0000\u0000\u0139\u04e6\u0001\u0000\u0000"+ - "\u0000\u013b\u04eb\u0001\u0000\u0000\u0000\u013d\u04f0\u0001\u0000\u0000"+ - "\u0000\u013f\u04f4\u0001\u0000\u0000\u0000\u0141\u04f8\u0001\u0000\u0000"+ - "\u0000\u0143\u04fc\u0001\u0000\u0000\u0000\u0145\u0500\u0001\u0000\u0000"+ - "\u0000\u0147\u0504\u0001\u0000\u0000\u0000\u0149\u0509\u0001\u0000\u0000"+ - "\u0000\u014b\u050e\u0001\u0000\u0000\u0000\u014d\u0512\u0001\u0000\u0000"+ - "\u0000\u014f\u0516\u0001\u0000\u0000\u0000\u0151\u051a\u0001\u0000\u0000"+ - "\u0000\u0153\u051f\u0001\u0000\u0000\u0000\u0155\u0528\u0001\u0000\u0000"+ - "\u0000\u0157\u052c\u0001\u0000\u0000\u0000\u0159\u0530\u0001\u0000\u0000"+ - "\u0000\u015b\u0534\u0001\u0000\u0000\u0000\u015d\u0538\u0001\u0000\u0000"+ - "\u0000\u015f\u053d\u0001\u0000\u0000\u0000\u0161\u0541\u0001\u0000\u0000"+ - "\u0000\u0163\u0545\u0001\u0000\u0000\u0000\u0165\u0549\u0001\u0000\u0000"+ - "\u0000\u0167\u054e\u0001\u0000\u0000\u0000\u0169\u0552\u0001\u0000\u0000"+ - "\u0000\u016b\u0556\u0001\u0000\u0000\u0000\u016d\u055a\u0001\u0000\u0000"+ - "\u0000\u016f\u055e\u0001\u0000\u0000\u0000\u0171\u0562\u0001\u0000\u0000"+ - "\u0000\u0173\u0568\u0001\u0000\u0000\u0000\u0175\u056c\u0001\u0000\u0000"+ - "\u0000\u0177\u0570\u0001\u0000\u0000\u0000\u0179\u0574\u0001\u0000\u0000"+ - "\u0000\u017b\u0578\u0001\u0000\u0000\u0000\u017d\u057c\u0001\u0000\u0000"+ - "\u0000\u017f\u0580\u0001\u0000\u0000\u0000\u0181\u0585\u0001\u0000\u0000"+ - "\u0000\u0183\u058b\u0001\u0000\u0000\u0000\u0185\u0591\u0001\u0000\u0000"+ - "\u0000\u0187\u0595\u0001\u0000\u0000\u0000\u0189\u0599\u0001\u0000\u0000"+ - "\u0000\u018b\u059d\u0001\u0000\u0000\u0000\u018d\u05a3\u0001\u0000\u0000"+ - "\u0000\u018f\u05a9\u0001\u0000\u0000\u0000\u0191\u05ad\u0001\u0000\u0000"+ - "\u0000\u0193\u05b1\u0001\u0000\u0000\u0000\u0195\u05b5\u0001\u0000\u0000"+ - "\u0000\u0197\u05bb\u0001\u0000\u0000\u0000\u0199\u05c1\u0001\u0000\u0000"+ - "\u0000\u019b\u05c7\u0001\u0000\u0000\u0000\u019d\u019e\u0007\u0000\u0000"+ - "\u0000\u019e\u019f\u0007\u0001\u0000\u0000\u019f\u01a0\u0007\u0002\u0000"+ - "\u0000\u01a0\u01a1\u0007\u0002\u0000\u0000\u01a1\u01a2\u0007\u0003\u0000"+ - "\u0000\u01a2\u01a3\u0007\u0004\u0000\u0000\u01a3\u01a4\u0007\u0005\u0000"+ - "\u0000\u01a4\u01a5\u0001\u0000\u0000\u0000\u01a5\u01a6\u0006\u0000\u0000"+ - "\u0000\u01a6\u0010\u0001\u0000\u0000\u0000\u01a7\u01a8\u0007\u0000\u0000"+ - "\u0000\u01a8\u01a9\u0007\u0006\u0000\u0000\u01a9\u01aa\u0007\u0007\u0000"+ - "\u0000\u01aa\u01ab\u0007\b\u0000\u0000\u01ab\u01ac\u0001\u0000\u0000\u0000"+ - "\u01ac\u01ad\u0006\u0001\u0001\u0000\u01ad\u0012\u0001\u0000\u0000\u0000"+ - "\u01ae\u01af\u0007\u0003\u0000\u0000\u01af\u01b0\u0007\t\u0000\u0000\u01b0"+ - "\u01b1\u0007\u0006\u0000\u0000\u01b1\u01b2\u0007\u0001\u0000\u0000\u01b2"+ - "\u01b3\u0007\u0004\u0000\u0000\u01b3\u01b4\u0007\n\u0000\u0000\u01b4\u01b5"+ - "\u0001\u0000\u0000\u0000\u01b5\u01b6\u0006\u0002\u0002\u0000\u01b6\u0014"+ - "\u0001\u0000\u0000\u0000\u01b7\u01b8\u0007\u0003\u0000\u0000\u01b8\u01b9"+ - "\u0007\u000b\u0000\u0000\u01b9\u01ba\u0007\f\u0000\u0000\u01ba\u01bb\u0007"+ - "\r\u0000\u0000\u01bb\u01bc\u0001\u0000\u0000\u0000\u01bc\u01bd\u0006\u0003"+ - "\u0000\u0000\u01bd\u0016\u0001\u0000\u0000\u0000\u01be\u01bf\u0007\u0003"+ - "\u0000\u0000\u01bf\u01c0\u0007\u000e\u0000\u0000\u01c0\u01c1\u0007\b\u0000"+ - "\u0000\u01c1\u01c2\u0007\r\u0000\u0000\u01c2\u01c3\u0007\f\u0000\u0000"+ - "\u01c3\u01c4\u0007\u0001\u0000\u0000\u01c4\u01c5\u0007\t\u0000\u0000\u01c5"+ - "\u01c6\u0001\u0000\u0000\u0000\u01c6\u01c7\u0006\u0004\u0003\u0000\u01c7"+ - "\u0018\u0001\u0000\u0000\u0000\u01c8\u01c9\u0007\u000f\u0000\u0000\u01c9"+ - "\u01ca\u0007\u0006\u0000\u0000\u01ca\u01cb\u0007\u0007\u0000\u0000\u01cb"+ - "\u01cc\u0007\u0010\u0000\u0000\u01cc\u01cd\u0001\u0000\u0000\u0000\u01cd"+ - "\u01ce\u0006\u0005\u0004\u0000\u01ce\u001a\u0001\u0000\u0000\u0000\u01cf"+ - "\u01d0\u0007\u0011\u0000\u0000\u01d0\u01d1\u0007\u0006\u0000\u0000\u01d1"+ - "\u01d2\u0007\u0007\u0000\u0000\u01d2\u01d3\u0007\u0012\u0000\u0000\u01d3"+ - "\u01d4\u0001\u0000\u0000\u0000\u01d4\u01d5\u0006\u0006\u0000\u0000\u01d5"+ - "\u001c\u0001\u0000\u0000\u0000\u01d6\u01d7\u0007\u0012\u0000\u0000\u01d7"+ - "\u01d8\u0007\u0003\u0000\u0000\u01d8\u01d9\u0007\u0003\u0000\u0000\u01d9"+ - "\u01da\u0007\b\u0000\u0000\u01da\u01db\u0001\u0000\u0000\u0000\u01db\u01dc"+ - "\u0006\u0007\u0001\u0000\u01dc\u001e\u0001\u0000\u0000\u0000\u01dd\u01de"+ - "\u0007\r\u0000\u0000\u01de\u01df\u0007\u0001\u0000\u0000\u01df\u01e0\u0007"+ - "\u0010\u0000\u0000\u01e0\u01e1\u0007\u0001\u0000\u0000\u01e1\u01e2\u0007"+ - "\u0005\u0000\u0000\u01e2\u01e3\u0001\u0000\u0000\u0000\u01e3\u01e4\u0006"+ - "\b\u0000\u0000\u01e4 \u0001\u0000\u0000\u0000\u01e5\u01e6\u0007\u0010"+ - "\u0000\u0000\u01e6\u01e7\u0007\u000b\u0000\u0000\u01e7\u01e8\u0005_\u0000"+ - "\u0000\u01e8\u01e9\u0007\u0003\u0000\u0000\u01e9\u01ea\u0007\u000e\u0000"+ - "\u0000\u01ea\u01eb\u0007\b\u0000\u0000\u01eb\u01ec\u0007\f\u0000\u0000"+ - "\u01ec\u01ed\u0007\t\u0000\u0000\u01ed\u01ee\u0007\u0000\u0000\u0000\u01ee"+ - "\u01ef\u0001\u0000\u0000\u0000\u01ef\u01f0\u0006\t\u0005\u0000\u01f0\""+ - "\u0001\u0000\u0000\u0000\u01f1\u01f2\u0007\u0006\u0000\u0000\u01f2\u01f3"+ - "\u0007\u0003\u0000\u0000\u01f3\u01f4\u0007\t\u0000\u0000\u01f4\u01f5\u0007"+ - "\f\u0000\u0000\u01f5\u01f6\u0007\u0010\u0000\u0000\u01f6\u01f7\u0007\u0003"+ - "\u0000\u0000\u01f7\u01f8\u0001\u0000\u0000\u0000\u01f8\u01f9\u0006\n\u0006"+ - "\u0000\u01f9$\u0001\u0000\u0000\u0000\u01fa\u01fb\u0007\u0006\u0000\u0000"+ - "\u01fb\u01fc\u0007\u0007\u0000\u0000\u01fc\u01fd\u0007\u0013\u0000\u0000"+ - "\u01fd\u01fe\u0001\u0000\u0000\u0000\u01fe\u01ff\u0006\u000b\u0000\u0000"+ - "\u01ff&\u0001\u0000\u0000\u0000\u0200\u0201\u0007\u0002\u0000\u0000\u0201"+ - "\u0202\u0007\n\u0000\u0000\u0202\u0203\u0007\u0007\u0000\u0000\u0203\u0204"+ - "\u0007\u0013\u0000\u0000\u0204\u0205\u0001\u0000\u0000\u0000\u0205\u0206"+ - "\u0006\f\u0007\u0000\u0206(\u0001\u0000\u0000\u0000\u0207\u0208\u0007"+ - "\u0002\u0000\u0000\u0208\u0209\u0007\u0007\u0000\u0000\u0209\u020a\u0007"+ - "\u0006\u0000\u0000\u020a\u020b\u0007\u0005\u0000\u0000\u020b\u020c\u0001"+ - "\u0000\u0000\u0000\u020c\u020d\u0006\r\u0000\u0000\u020d*\u0001\u0000"+ - "\u0000\u0000\u020e\u020f\u0007\u0002\u0000\u0000\u020f\u0210\u0007\u0005"+ - "\u0000\u0000\u0210\u0211\u0007\f\u0000\u0000\u0211\u0212\u0007\u0005\u0000"+ - "\u0000\u0212\u0213\u0007\u0002\u0000\u0000\u0213\u0214\u0001\u0000\u0000"+ - "\u0000\u0214\u0215\u0006\u000e\u0000\u0000\u0215,\u0001\u0000\u0000\u0000"+ - "\u0216\u0217\u0007\u0013\u0000\u0000\u0217\u0218\u0007\n\u0000\u0000\u0218"+ - "\u0219\u0007\u0003\u0000\u0000\u0219\u021a\u0007\u0006\u0000\u0000\u021a"+ - "\u021b\u0007\u0003\u0000\u0000\u021b\u021c\u0001\u0000\u0000\u0000\u021c"+ - "\u021d\u0006\u000f\u0000\u0000\u021d.\u0001\u0000\u0000\u0000\u021e\u021f"+ - "\u0004\u0010\u0000\u0000\u021f\u0220\u0007\u0001\u0000\u0000\u0220\u0221"+ - "\u0007\t\u0000\u0000\u0221\u0222\u0007\r\u0000\u0000\u0222\u0223\u0007"+ - "\u0001\u0000\u0000\u0223\u0224\u0007\t\u0000\u0000\u0224\u0225\u0007\u0003"+ - "\u0000\u0000\u0225\u0226\u0007\u0002\u0000\u0000\u0226\u0227\u0007\u0005"+ - "\u0000\u0000\u0227\u0228\u0007\f\u0000\u0000\u0228\u0229\u0007\u0005\u0000"+ - "\u0000\u0229\u022a\u0007\u0002\u0000\u0000\u022a\u022b\u0001\u0000\u0000"+ - "\u0000\u022b\u022c\u0006\u0010\u0000\u0000\u022c0\u0001\u0000\u0000\u0000"+ - "\u022d\u022e\u0004\u0011\u0001\u0000\u022e\u022f\u0007\r\u0000\u0000\u022f"+ - "\u0230\u0007\u0007\u0000\u0000\u0230\u0231\u0007\u0007\u0000\u0000\u0231"+ - "\u0232\u0007\u0012\u0000\u0000\u0232\u0233\u0007\u0014\u0000\u0000\u0233"+ - "\u0234\u0007\b\u0000\u0000\u0234\u0235\u0001\u0000\u0000\u0000\u0235\u0236"+ - "\u0006\u0011\b\u0000\u02362\u0001\u0000\u0000\u0000\u0237\u0238\u0004"+ - "\u0012\u0002\u0000\u0238\u0239\u0007\u0010\u0000\u0000\u0239\u023a\u0007"+ - "\u0003\u0000\u0000\u023a\u023b\u0007\u0005\u0000\u0000\u023b\u023c\u0007"+ - "\u0006\u0000\u0000\u023c\u023d\u0007\u0001\u0000\u0000\u023d\u023e\u0007"+ - "\u0004\u0000\u0000\u023e\u023f\u0007\u0002\u0000\u0000\u023f\u0240\u0001"+ - "\u0000\u0000\u0000\u0240\u0241\u0006\u0012\t\u0000\u02414\u0001\u0000"+ - "\u0000\u0000\u0242\u0244\b\u0015\u0000\u0000\u0243\u0242\u0001\u0000\u0000"+ - "\u0000\u0244\u0245\u0001\u0000\u0000\u0000\u0245\u0243\u0001\u0000\u0000"+ - "\u0000\u0245\u0246\u0001\u0000\u0000\u0000\u0246\u0247\u0001\u0000\u0000"+ - "\u0000\u0247\u0248\u0006\u0013\u0000\u0000\u02486\u0001\u0000\u0000\u0000"+ - "\u0249\u024a\u0005/\u0000\u0000\u024a\u024b\u0005/\u0000\u0000\u024b\u024f"+ - "\u0001\u0000\u0000\u0000\u024c\u024e\b\u0016\u0000\u0000\u024d\u024c\u0001"+ - "\u0000\u0000\u0000\u024e\u0251\u0001\u0000\u0000\u0000\u024f\u024d\u0001"+ - "\u0000\u0000\u0000\u024f\u0250\u0001\u0000\u0000\u0000\u0250\u0253\u0001"+ - "\u0000\u0000\u0000\u0251\u024f\u0001\u0000\u0000\u0000\u0252\u0254\u0005"+ - "\r\u0000\u0000\u0253\u0252\u0001\u0000\u0000\u0000\u0253\u0254\u0001\u0000"+ - "\u0000\u0000\u0254\u0256\u0001\u0000\u0000\u0000\u0255\u0257\u0005\n\u0000"+ - "\u0000\u0256\u0255\u0001\u0000\u0000\u0000\u0256\u0257\u0001\u0000\u0000"+ - "\u0000\u0257\u0258\u0001\u0000\u0000\u0000\u0258\u0259\u0006\u0014\n\u0000"+ - "\u02598\u0001\u0000\u0000\u0000\u025a\u025b\u0005/\u0000\u0000\u025b\u025c"+ - "\u0005*\u0000\u0000\u025c\u0261\u0001\u0000\u0000\u0000\u025d\u0260\u0003"+ - "9\u0015\u0000\u025e\u0260\t\u0000\u0000\u0000\u025f\u025d\u0001\u0000"+ - "\u0000\u0000\u025f\u025e\u0001\u0000\u0000\u0000\u0260\u0263\u0001\u0000"+ - "\u0000\u0000\u0261\u0262\u0001\u0000\u0000\u0000\u0261\u025f\u0001\u0000"+ - "\u0000\u0000\u0262\u0264\u0001\u0000\u0000\u0000\u0263\u0261\u0001\u0000"+ - "\u0000\u0000\u0264\u0265\u0005*\u0000\u0000\u0265\u0266\u0005/\u0000\u0000"+ - "\u0266\u0267\u0001\u0000\u0000\u0000\u0267\u0268\u0006\u0015\n\u0000\u0268"+ - ":\u0001\u0000\u0000\u0000\u0269\u026b\u0007\u0017\u0000\u0000\u026a\u0269"+ - "\u0001\u0000\u0000\u0000\u026b\u026c\u0001\u0000\u0000\u0000\u026c\u026a"+ - "\u0001\u0000\u0000\u0000\u026c\u026d\u0001\u0000\u0000\u0000\u026d\u026e"+ - "\u0001\u0000\u0000\u0000\u026e\u026f\u0006\u0016\n\u0000\u026f<\u0001"+ - "\u0000\u0000\u0000\u0270\u0271\u0005:\u0000\u0000\u0271>\u0001\u0000\u0000"+ - "\u0000\u0272\u0273\u0005|\u0000\u0000\u0273\u0274\u0001\u0000\u0000\u0000"+ - "\u0274\u0275\u0006\u0018\u000b\u0000\u0275@\u0001\u0000\u0000\u0000\u0276"+ - "\u0277\u0007\u0018\u0000\u0000\u0277B\u0001\u0000\u0000\u0000\u0278\u0279"+ - "\u0007\u0019\u0000\u0000\u0279D\u0001\u0000\u0000\u0000\u027a\u027b\u0005"+ - "\\\u0000\u0000\u027b\u027c\u0007\u001a\u0000\u0000\u027cF\u0001\u0000"+ - "\u0000\u0000\u027d\u027e\b\u001b\u0000\u0000\u027eH\u0001\u0000\u0000"+ - "\u0000\u027f\u0281\u0007\u0003\u0000\u0000\u0280\u0282\u0007\u001c\u0000"+ - "\u0000\u0281\u0280\u0001\u0000\u0000\u0000\u0281\u0282\u0001\u0000\u0000"+ - "\u0000\u0282\u0284\u0001\u0000\u0000\u0000\u0283\u0285\u0003A\u0019\u0000"+ - "\u0284\u0283\u0001\u0000\u0000\u0000\u0285\u0286\u0001\u0000\u0000\u0000"+ - "\u0286\u0284\u0001\u0000\u0000\u0000\u0286\u0287\u0001\u0000\u0000\u0000"+ - "\u0287J\u0001\u0000\u0000\u0000\u0288\u0289\u0005@\u0000\u0000\u0289L"+ - "\u0001\u0000\u0000\u0000\u028a\u028b\u0005`\u0000\u0000\u028bN\u0001\u0000"+ - "\u0000\u0000\u028c\u0290\b\u001d\u0000\u0000\u028d\u028e\u0005`\u0000"+ - "\u0000\u028e\u0290\u0005`\u0000\u0000\u028f\u028c\u0001\u0000\u0000\u0000"+ - "\u028f\u028d\u0001\u0000\u0000\u0000\u0290P\u0001\u0000\u0000\u0000\u0291"+ - "\u0292\u0005_\u0000\u0000\u0292R\u0001\u0000\u0000\u0000\u0293\u0297\u0003"+ - "C\u001a\u0000\u0294\u0297\u0003A\u0019\u0000\u0295\u0297\u0003Q!\u0000"+ - "\u0296\u0293\u0001\u0000\u0000\u0000\u0296\u0294\u0001\u0000\u0000\u0000"+ - "\u0296\u0295\u0001\u0000\u0000\u0000\u0297T\u0001\u0000\u0000\u0000\u0298"+ - "\u029d\u0005\"\u0000\u0000\u0299\u029c\u0003E\u001b\u0000\u029a\u029c"+ - "\u0003G\u001c\u0000\u029b\u0299\u0001\u0000\u0000\u0000\u029b\u029a\u0001"+ - "\u0000\u0000\u0000\u029c\u029f\u0001\u0000\u0000\u0000\u029d\u029b\u0001"+ - "\u0000\u0000\u0000\u029d\u029e\u0001\u0000\u0000\u0000\u029e\u02a0\u0001"+ - "\u0000\u0000\u0000\u029f\u029d\u0001\u0000\u0000\u0000\u02a0\u02b6\u0005"+ - "\"\u0000\u0000\u02a1\u02a2\u0005\"\u0000\u0000\u02a2\u02a3\u0005\"\u0000"+ - "\u0000\u02a3\u02a4\u0005\"\u0000\u0000\u02a4\u02a8\u0001\u0000\u0000\u0000"+ - "\u02a5\u02a7\b\u0016\u0000\u0000\u02a6\u02a5\u0001\u0000\u0000\u0000\u02a7"+ - "\u02aa\u0001\u0000\u0000\u0000\u02a8\u02a9\u0001\u0000\u0000\u0000\u02a8"+ - "\u02a6\u0001\u0000\u0000\u0000\u02a9\u02ab\u0001\u0000\u0000\u0000\u02aa"+ - "\u02a8\u0001\u0000\u0000\u0000\u02ab\u02ac\u0005\"\u0000\u0000\u02ac\u02ad"+ - "\u0005\"\u0000\u0000\u02ad\u02ae\u0005\"\u0000\u0000\u02ae\u02b0\u0001"+ - "\u0000\u0000\u0000\u02af\u02b1\u0005\"\u0000\u0000\u02b0\u02af\u0001\u0000"+ - "\u0000\u0000\u02b0\u02b1\u0001\u0000\u0000\u0000\u02b1\u02b3\u0001\u0000"+ - "\u0000\u0000\u02b2\u02b4\u0005\"\u0000\u0000\u02b3\u02b2\u0001\u0000\u0000"+ - "\u0000\u02b3\u02b4\u0001\u0000\u0000\u0000\u02b4\u02b6\u0001\u0000\u0000"+ - "\u0000\u02b5\u0298\u0001\u0000\u0000\u0000\u02b5\u02a1\u0001\u0000\u0000"+ - "\u0000\u02b6V\u0001\u0000\u0000\u0000\u02b7\u02b9\u0003A\u0019\u0000\u02b8"+ - "\u02b7\u0001\u0000\u0000\u0000\u02b9\u02ba\u0001\u0000\u0000\u0000\u02ba"+ - "\u02b8\u0001\u0000\u0000\u0000\u02ba\u02bb\u0001\u0000\u0000\u0000\u02bb"+ - "X\u0001\u0000\u0000\u0000\u02bc\u02be\u0003A\u0019\u0000\u02bd\u02bc\u0001"+ - "\u0000\u0000\u0000\u02be\u02bf\u0001\u0000\u0000\u0000\u02bf\u02bd\u0001"+ - "\u0000\u0000\u0000\u02bf\u02c0\u0001\u0000\u0000\u0000\u02c0\u02c1\u0001"+ - "\u0000\u0000\u0000\u02c1\u02c5\u0003i-\u0000\u02c2\u02c4\u0003A\u0019"+ - "\u0000\u02c3\u02c2\u0001\u0000\u0000\u0000\u02c4\u02c7\u0001\u0000\u0000"+ - "\u0000\u02c5\u02c3\u0001\u0000\u0000\u0000\u02c5\u02c6\u0001\u0000\u0000"+ - "\u0000\u02c6\u02e7\u0001\u0000\u0000\u0000\u02c7\u02c5\u0001\u0000\u0000"+ - "\u0000\u02c8\u02ca\u0003i-\u0000\u02c9\u02cb\u0003A\u0019\u0000\u02ca"+ - "\u02c9\u0001\u0000\u0000\u0000\u02cb\u02cc\u0001\u0000\u0000\u0000\u02cc"+ - "\u02ca\u0001\u0000\u0000\u0000\u02cc\u02cd\u0001\u0000\u0000\u0000\u02cd"+ - "\u02e7\u0001\u0000\u0000\u0000\u02ce\u02d0\u0003A\u0019\u0000\u02cf\u02ce"+ - "\u0001\u0000\u0000\u0000\u02d0\u02d1\u0001\u0000\u0000\u0000\u02d1\u02cf"+ - "\u0001\u0000\u0000\u0000\u02d1\u02d2\u0001\u0000\u0000\u0000\u02d2\u02da"+ - "\u0001\u0000\u0000\u0000\u02d3\u02d7\u0003i-\u0000\u02d4\u02d6\u0003A"+ - "\u0019\u0000\u02d5\u02d4\u0001\u0000\u0000\u0000\u02d6\u02d9\u0001\u0000"+ - "\u0000\u0000\u02d7\u02d5\u0001\u0000\u0000\u0000\u02d7\u02d8\u0001\u0000"+ - "\u0000\u0000\u02d8\u02db\u0001\u0000\u0000\u0000\u02d9\u02d7\u0001\u0000"+ - "\u0000\u0000\u02da\u02d3\u0001\u0000\u0000\u0000\u02da\u02db\u0001\u0000"+ - "\u0000\u0000\u02db\u02dc\u0001\u0000\u0000\u0000\u02dc\u02dd\u0003I\u001d"+ - "\u0000\u02dd\u02e7\u0001\u0000\u0000\u0000\u02de\u02e0\u0003i-\u0000\u02df"+ - "\u02e1\u0003A\u0019\u0000\u02e0\u02df\u0001\u0000\u0000\u0000\u02e1\u02e2"+ - "\u0001\u0000\u0000\u0000\u02e2\u02e0\u0001\u0000\u0000\u0000\u02e2\u02e3"+ - "\u0001\u0000\u0000\u0000\u02e3\u02e4\u0001\u0000\u0000\u0000\u02e4\u02e5"+ - "\u0003I\u001d\u0000\u02e5\u02e7\u0001\u0000\u0000\u0000\u02e6\u02bd\u0001"+ - "\u0000\u0000\u0000\u02e6\u02c8\u0001\u0000\u0000\u0000\u02e6\u02cf\u0001"+ - "\u0000\u0000\u0000\u02e6\u02de\u0001\u0000\u0000\u0000\u02e7Z\u0001\u0000"+ - "\u0000\u0000\u02e8\u02e9\u0007\u001e\u0000\u0000\u02e9\u02ea\u0007\u001f"+ - "\u0000\u0000\u02ea\\\u0001\u0000\u0000\u0000\u02eb\u02ec\u0007\f\u0000"+ - "\u0000\u02ec\u02ed\u0007\t\u0000\u0000\u02ed\u02ee\u0007\u0000\u0000\u0000"+ - "\u02ee^\u0001\u0000\u0000\u0000\u02ef\u02f0\u0007\f\u0000\u0000\u02f0"+ - "\u02f1\u0007\u0002\u0000\u0000\u02f1\u02f2\u0007\u0004\u0000\u0000\u02f2"+ - "`\u0001\u0000\u0000\u0000\u02f3\u02f4\u0005=\u0000\u0000\u02f4b\u0001"+ - "\u0000\u0000\u0000\u02f5\u02f6\u0005:\u0000\u0000\u02f6\u02f7\u0005:\u0000"+ - "\u0000\u02f7d\u0001\u0000\u0000\u0000\u02f8\u02f9\u0005,\u0000\u0000\u02f9"+ - "f\u0001\u0000\u0000\u0000\u02fa\u02fb\u0007\u0000\u0000\u0000\u02fb\u02fc"+ - "\u0007\u0003\u0000\u0000\u02fc\u02fd\u0007\u0002\u0000\u0000\u02fd\u02fe"+ - "\u0007\u0004\u0000\u0000\u02feh\u0001\u0000\u0000\u0000\u02ff\u0300\u0005"+ - ".\u0000\u0000\u0300j\u0001\u0000\u0000\u0000\u0301\u0302\u0007\u000f\u0000"+ - "\u0000\u0302\u0303\u0007\f\u0000\u0000\u0303\u0304\u0007\r\u0000\u0000"+ - "\u0304\u0305\u0007\u0002\u0000\u0000\u0305\u0306\u0007\u0003\u0000\u0000"+ - "\u0306l\u0001\u0000\u0000\u0000\u0307\u0308\u0007\u000f\u0000\u0000\u0308"+ - "\u0309\u0007\u0001\u0000\u0000\u0309\u030a\u0007\u0006\u0000\u0000\u030a"+ - "\u030b\u0007\u0002\u0000\u0000\u030b\u030c\u0007\u0005\u0000\u0000\u030c"+ - "n\u0001\u0000\u0000\u0000\u030d\u030e\u0007\u0001\u0000\u0000\u030e\u030f"+ - "\u0007\t\u0000\u0000\u030fp\u0001\u0000\u0000\u0000\u0310\u0311\u0007"+ - "\u0001\u0000\u0000\u0311\u0312\u0007\u0002\u0000\u0000\u0312r\u0001\u0000"+ - "\u0000\u0000\u0313\u0314\u0007\r\u0000\u0000\u0314\u0315\u0007\f\u0000"+ - "\u0000\u0315\u0316\u0007\u0002\u0000\u0000\u0316\u0317\u0007\u0005\u0000"+ - "\u0000\u0317t\u0001\u0000\u0000\u0000\u0318\u0319\u0007\r\u0000\u0000"+ - "\u0319\u031a\u0007\u0001\u0000\u0000\u031a\u031b\u0007\u0012\u0000\u0000"+ - "\u031b\u031c\u0007\u0003\u0000\u0000\u031cv\u0001\u0000\u0000\u0000\u031d"+ - "\u031e\u0005(\u0000\u0000\u031ex\u0001\u0000\u0000\u0000\u031f\u0320\u0007"+ - "\t\u0000\u0000\u0320\u0321\u0007\u0007\u0000\u0000\u0321\u0322\u0007\u0005"+ - "\u0000\u0000\u0322z\u0001\u0000\u0000\u0000\u0323\u0324\u0007\t\u0000"+ - "\u0000\u0324\u0325\u0007\u0014\u0000\u0000\u0325\u0326\u0007\r\u0000\u0000"+ - "\u0326\u0327\u0007\r\u0000\u0000\u0327|\u0001\u0000\u0000\u0000\u0328"+ - "\u0329\u0007\t\u0000\u0000\u0329\u032a\u0007\u0014\u0000\u0000\u032a\u032b"+ - "\u0007\r\u0000\u0000\u032b\u032c\u0007\r\u0000\u0000\u032c\u032d\u0007"+ - "\u0002\u0000\u0000\u032d~\u0001\u0000\u0000\u0000\u032e\u032f\u0007\u0007"+ - "\u0000\u0000\u032f\u0330\u0007\u0006\u0000\u0000\u0330\u0080\u0001\u0000"+ - "\u0000\u0000\u0331\u0332\u0005?\u0000\u0000\u0332\u0082\u0001\u0000\u0000"+ - "\u0000\u0333\u0334\u0007\u0006\u0000\u0000\u0334\u0335\u0007\r\u0000\u0000"+ - "\u0335\u0336\u0007\u0001\u0000\u0000\u0336\u0337\u0007\u0012\u0000\u0000"+ - "\u0337\u0338\u0007\u0003\u0000\u0000\u0338\u0084\u0001\u0000\u0000\u0000"+ - "\u0339\u033a\u0005)\u0000\u0000\u033a\u0086\u0001\u0000\u0000\u0000\u033b"+ - "\u033c\u0007\u0005\u0000\u0000\u033c\u033d\u0007\u0006\u0000\u0000\u033d"+ - "\u033e\u0007\u0014\u0000\u0000\u033e\u033f\u0007\u0003\u0000\u0000\u033f"+ - "\u0088\u0001\u0000\u0000\u0000\u0340\u0341\u0005=\u0000\u0000\u0341\u0342"+ - "\u0005=\u0000\u0000\u0342\u008a\u0001\u0000\u0000\u0000\u0343\u0344\u0005"+ - "=\u0000\u0000\u0344\u0345\u0005~\u0000\u0000\u0345\u008c\u0001\u0000\u0000"+ - "\u0000\u0346\u0347\u0005!\u0000\u0000\u0347\u0348\u0005=\u0000\u0000\u0348"+ - "\u008e\u0001\u0000\u0000\u0000\u0349\u034a\u0005<\u0000\u0000\u034a\u0090"+ - "\u0001\u0000\u0000\u0000\u034b\u034c\u0005<\u0000\u0000\u034c\u034d\u0005"+ - "=\u0000\u0000\u034d\u0092\u0001\u0000\u0000\u0000\u034e\u034f\u0005>\u0000"+ - "\u0000\u034f\u0094\u0001\u0000\u0000\u0000\u0350\u0351\u0005>\u0000\u0000"+ - "\u0351\u0352\u0005=\u0000\u0000\u0352\u0096\u0001\u0000\u0000\u0000\u0353"+ - "\u0354\u0005+\u0000\u0000\u0354\u0098\u0001\u0000\u0000\u0000\u0355\u0356"+ - "\u0005-\u0000\u0000\u0356\u009a\u0001\u0000\u0000\u0000\u0357\u0358\u0005"+ - "*\u0000\u0000\u0358\u009c\u0001\u0000\u0000\u0000\u0359\u035a\u0005/\u0000"+ - "\u0000\u035a\u009e\u0001\u0000\u0000\u0000\u035b\u035c\u0005%\u0000\u0000"+ - "\u035c\u00a0\u0001\u0000\u0000\u0000\u035d\u035e\u0004I\u0003\u0000\u035e"+ - "\u035f\u0003=\u0017\u0000\u035f\u0360\u0001\u0000\u0000\u0000\u0360\u0361"+ - "\u0006I\f\u0000\u0361\u00a2\u0001\u0000\u0000\u0000\u0362\u0363\u0003"+ - "-\u000f\u0000\u0363\u0364\u0001\u0000\u0000\u0000\u0364\u0365\u0006J\r"+ - "\u0000\u0365\u00a4\u0001\u0000\u0000\u0000\u0366\u0369\u0003\u00819\u0000"+ - "\u0367\u036a\u0003C\u001a\u0000\u0368\u036a\u0003Q!\u0000\u0369\u0367"+ - "\u0001\u0000\u0000\u0000\u0369\u0368\u0001\u0000\u0000\u0000\u036a\u036e"+ - "\u0001\u0000\u0000\u0000\u036b\u036d\u0003S\"\u0000\u036c\u036b\u0001"+ - "\u0000\u0000\u0000\u036d\u0370\u0001\u0000\u0000\u0000\u036e\u036c\u0001"+ - "\u0000\u0000\u0000\u036e\u036f\u0001\u0000\u0000\u0000\u036f\u0378\u0001"+ - "\u0000\u0000\u0000\u0370\u036e\u0001\u0000\u0000\u0000\u0371\u0373\u0003"+ - "\u00819\u0000\u0372\u0374\u0003A\u0019\u0000\u0373\u0372\u0001\u0000\u0000"+ - "\u0000\u0374\u0375\u0001\u0000\u0000\u0000\u0375\u0373\u0001\u0000\u0000"+ - "\u0000\u0375\u0376\u0001\u0000\u0000\u0000\u0376\u0378\u0001\u0000\u0000"+ - "\u0000\u0377\u0366\u0001\u0000\u0000\u0000\u0377\u0371\u0001\u0000\u0000"+ - "\u0000\u0378\u00a6\u0001\u0000\u0000\u0000\u0379\u037a\u0005[\u0000\u0000"+ - "\u037a\u037b\u0001\u0000\u0000\u0000\u037b\u037c\u0006L\u0000\u0000\u037c"+ - "\u037d\u0006L\u0000\u0000\u037d\u00a8\u0001\u0000\u0000\u0000\u037e\u037f"+ - "\u0005]\u0000\u0000\u037f\u0380\u0001\u0000\u0000\u0000\u0380\u0381\u0006"+ - "M\u000b\u0000\u0381\u0382\u0006M\u000b\u0000\u0382\u00aa\u0001\u0000\u0000"+ - "\u0000\u0383\u0387\u0003C\u001a\u0000\u0384\u0386\u0003S\"\u0000\u0385"+ - "\u0384\u0001\u0000\u0000\u0000\u0386\u0389\u0001\u0000\u0000\u0000\u0387"+ - "\u0385\u0001\u0000\u0000\u0000\u0387\u0388\u0001\u0000\u0000\u0000\u0388"+ - "\u0394\u0001\u0000\u0000\u0000\u0389\u0387\u0001\u0000\u0000\u0000\u038a"+ - "\u038d\u0003Q!\u0000\u038b\u038d\u0003K\u001e\u0000\u038c\u038a\u0001"+ - "\u0000\u0000\u0000\u038c\u038b\u0001\u0000\u0000\u0000\u038d\u038f\u0001"+ - "\u0000\u0000\u0000\u038e\u0390\u0003S\"\u0000\u038f\u038e\u0001\u0000"+ - "\u0000\u0000\u0390\u0391\u0001\u0000\u0000\u0000\u0391\u038f\u0001\u0000"+ - "\u0000\u0000\u0391\u0392\u0001\u0000\u0000\u0000\u0392\u0394\u0001\u0000"+ - "\u0000\u0000\u0393\u0383\u0001\u0000\u0000\u0000\u0393\u038c\u0001\u0000"+ - "\u0000\u0000\u0394\u00ac\u0001\u0000\u0000\u0000\u0395\u0397\u0003M\u001f"+ - "\u0000\u0396\u0398\u0003O \u0000\u0397\u0396\u0001\u0000\u0000\u0000\u0398"+ - "\u0399\u0001\u0000\u0000\u0000\u0399\u0397\u0001\u0000\u0000\u0000\u0399"+ - "\u039a\u0001\u0000\u0000\u0000\u039a\u039b\u0001\u0000\u0000\u0000\u039b"+ - "\u039c\u0003M\u001f\u0000\u039c\u00ae\u0001\u0000\u0000\u0000\u039d\u039e"+ - "\u0003\u00adO\u0000\u039e\u00b0\u0001\u0000\u0000\u0000\u039f\u03a0\u0003"+ - "7\u0014\u0000\u03a0\u03a1\u0001\u0000\u0000\u0000\u03a1\u03a2\u0006Q\n"+ - "\u0000\u03a2\u00b2\u0001\u0000\u0000\u0000\u03a3\u03a4\u00039\u0015\u0000"+ - "\u03a4\u03a5\u0001\u0000\u0000\u0000\u03a5\u03a6\u0006R\n\u0000\u03a6"+ - "\u00b4\u0001\u0000\u0000\u0000\u03a7\u03a8\u0003;\u0016\u0000\u03a8\u03a9"+ - "\u0001\u0000\u0000\u0000\u03a9\u03aa\u0006S\n\u0000\u03aa\u00b6\u0001"+ - "\u0000\u0000\u0000\u03ab\u03ac\u0003\u00a7L\u0000\u03ac\u03ad\u0001\u0000"+ - "\u0000\u0000\u03ad\u03ae\u0006T\u000e\u0000\u03ae\u03af\u0006T\u000f\u0000"+ - "\u03af\u00b8\u0001\u0000\u0000\u0000\u03b0\u03b1\u0003?\u0018\u0000\u03b1"+ - "\u03b2\u0001\u0000\u0000\u0000\u03b2\u03b3\u0006U\u0010\u0000\u03b3\u03b4"+ - "\u0006U\u000b\u0000\u03b4\u00ba\u0001\u0000\u0000\u0000\u03b5\u03b6\u0003"+ - ";\u0016\u0000\u03b6\u03b7\u0001\u0000\u0000\u0000\u03b7\u03b8\u0006V\n"+ - "\u0000\u03b8\u00bc\u0001\u0000\u0000\u0000\u03b9\u03ba\u00037\u0014\u0000"+ - "\u03ba\u03bb\u0001\u0000\u0000\u0000\u03bb\u03bc\u0006W\n\u0000\u03bc"+ - "\u00be\u0001\u0000\u0000\u0000\u03bd\u03be\u00039\u0015\u0000\u03be\u03bf"+ - "\u0001\u0000\u0000\u0000\u03bf\u03c0\u0006X\n\u0000\u03c0\u00c0\u0001"+ - "\u0000\u0000\u0000\u03c1\u03c2\u0003?\u0018\u0000\u03c2\u03c3\u0001\u0000"+ - "\u0000\u0000\u03c3\u03c4\u0006Y\u0010\u0000\u03c4\u03c5\u0006Y\u000b\u0000"+ - "\u03c5\u00c2\u0001\u0000\u0000\u0000\u03c6\u03c7\u0003\u00a7L\u0000\u03c7"+ - "\u03c8\u0001\u0000\u0000\u0000\u03c8\u03c9\u0006Z\u000e\u0000\u03c9\u00c4"+ - "\u0001\u0000\u0000\u0000\u03ca\u03cb\u0003\u00a9M\u0000\u03cb\u03cc\u0001"+ - "\u0000\u0000\u0000\u03cc\u03cd\u0006[\u0011\u0000\u03cd\u00c6\u0001\u0000"+ - "\u0000\u0000\u03ce\u03cf\u0003=\u0017\u0000\u03cf\u03d0\u0001\u0000\u0000"+ - "\u0000\u03d0\u03d1\u0006\\\f\u0000\u03d1\u00c8\u0001\u0000\u0000\u0000"+ - "\u03d2\u03d3\u0003e+\u0000\u03d3\u03d4\u0001\u0000\u0000\u0000\u03d4\u03d5"+ - "\u0006]\u0012\u0000\u03d5\u00ca\u0001\u0000\u0000\u0000\u03d6\u03d7\u0003"+ - "a)\u0000\u03d7\u03d8\u0001\u0000\u0000\u0000\u03d8\u03d9\u0006^\u0013"+ - "\u0000\u03d9\u00cc\u0001\u0000\u0000\u0000\u03da\u03db\u0007\u0010\u0000"+ - "\u0000\u03db\u03dc\u0007\u0003\u0000\u0000\u03dc\u03dd\u0007\u0005\u0000"+ - "\u0000\u03dd\u03de\u0007\f\u0000\u0000\u03de\u03df\u0007\u0000\u0000\u0000"+ - "\u03df\u03e0\u0007\f\u0000\u0000\u03e0\u03e1\u0007\u0005\u0000\u0000\u03e1"+ - "\u03e2\u0007\f\u0000\u0000\u03e2\u00ce\u0001\u0000\u0000\u0000\u03e3\u03e7"+ - "\b \u0000\u0000\u03e4\u03e5\u0005/\u0000\u0000\u03e5\u03e7\b!\u0000\u0000"+ - "\u03e6\u03e3\u0001\u0000\u0000\u0000\u03e6\u03e4\u0001\u0000\u0000\u0000"+ - "\u03e7\u00d0\u0001\u0000\u0000\u0000\u03e8\u03ea\u0003\u00cf`\u0000\u03e9"+ - "\u03e8\u0001\u0000\u0000\u0000\u03ea\u03eb\u0001\u0000\u0000\u0000\u03eb"+ - "\u03e9\u0001\u0000\u0000\u0000\u03eb\u03ec\u0001\u0000\u0000\u0000\u03ec"+ - "\u00d2\u0001\u0000\u0000\u0000\u03ed\u03ee\u0003\u00d1a\u0000\u03ee\u03ef"+ - "\u0001\u0000\u0000\u0000\u03ef\u03f0\u0006b\u0014\u0000\u03f0\u00d4\u0001"+ - "\u0000\u0000\u0000\u03f1\u03f2\u0003U#\u0000\u03f2\u03f3\u0001\u0000\u0000"+ - "\u0000\u03f3\u03f4\u0006c\u0015\u0000\u03f4\u00d6\u0001\u0000\u0000\u0000"+ - "\u03f5\u03f6\u00037\u0014\u0000\u03f6\u03f7\u0001\u0000\u0000\u0000\u03f7"+ - "\u03f8\u0006d\n\u0000\u03f8\u00d8\u0001\u0000\u0000\u0000\u03f9\u03fa"+ - "\u00039\u0015\u0000\u03fa\u03fb\u0001\u0000\u0000\u0000\u03fb\u03fc\u0006"+ - "e\n\u0000\u03fc\u00da\u0001\u0000\u0000\u0000\u03fd\u03fe\u0003;\u0016"+ - "\u0000\u03fe\u03ff\u0001\u0000\u0000\u0000\u03ff\u0400\u0006f\n\u0000"+ - "\u0400\u00dc\u0001\u0000\u0000\u0000\u0401\u0402\u0003?\u0018\u0000\u0402"+ - "\u0403\u0001\u0000\u0000\u0000\u0403\u0404\u0006g\u0010\u0000\u0404\u0405"+ - "\u0006g\u000b\u0000\u0405\u00de\u0001\u0000\u0000\u0000\u0406\u0407\u0003"+ - "i-\u0000\u0407\u0408\u0001\u0000\u0000\u0000\u0408\u0409\u0006h\u0016"+ - "\u0000\u0409\u00e0\u0001\u0000\u0000\u0000\u040a\u040b\u0003e+\u0000\u040b"+ - "\u040c\u0001\u0000\u0000\u0000\u040c\u040d\u0006i\u0012\u0000\u040d\u00e2"+ - "\u0001\u0000\u0000\u0000\u040e\u040f\u0004j\u0004\u0000\u040f\u0410\u0003"+ - "\u00819\u0000\u0410\u0411\u0001\u0000\u0000\u0000\u0411\u0412\u0006j\u0017"+ - "\u0000\u0412\u00e4\u0001\u0000\u0000\u0000\u0413\u0414\u0004k\u0005\u0000"+ - "\u0414\u0415\u0003\u00a5K\u0000\u0415\u0416\u0001\u0000\u0000\u0000\u0416"+ - "\u0417\u0006k\u0018\u0000\u0417\u00e6\u0001\u0000\u0000\u0000\u0418\u041d"+ - "\u0003C\u001a\u0000\u0419\u041d\u0003A\u0019\u0000\u041a\u041d\u0003Q"+ - "!\u0000\u041b\u041d\u0003\u009bF\u0000\u041c\u0418\u0001\u0000\u0000\u0000"+ - "\u041c\u0419\u0001\u0000\u0000\u0000\u041c\u041a\u0001\u0000\u0000\u0000"+ - "\u041c\u041b\u0001\u0000\u0000\u0000\u041d\u00e8\u0001\u0000\u0000\u0000"+ - "\u041e\u0421\u0003C\u001a\u0000\u041f\u0421\u0003\u009bF\u0000\u0420\u041e"+ - "\u0001\u0000\u0000\u0000\u0420\u041f\u0001\u0000\u0000\u0000\u0421\u0425"+ - "\u0001\u0000\u0000\u0000\u0422\u0424\u0003\u00e7l\u0000\u0423\u0422\u0001"+ - "\u0000\u0000\u0000\u0424\u0427\u0001\u0000\u0000\u0000\u0425\u0423\u0001"+ - "\u0000\u0000\u0000\u0425\u0426\u0001\u0000\u0000\u0000\u0426\u0432\u0001"+ - "\u0000\u0000\u0000\u0427\u0425\u0001\u0000\u0000\u0000\u0428\u042b\u0003"+ - "Q!\u0000\u0429\u042b\u0003K\u001e\u0000\u042a\u0428\u0001\u0000\u0000"+ - "\u0000\u042a\u0429\u0001\u0000\u0000\u0000\u042b\u042d\u0001\u0000\u0000"+ - "\u0000\u042c\u042e\u0003\u00e7l\u0000\u042d\u042c\u0001\u0000\u0000\u0000"+ - "\u042e\u042f\u0001\u0000\u0000\u0000\u042f\u042d\u0001\u0000\u0000\u0000"+ - "\u042f\u0430\u0001\u0000\u0000\u0000\u0430\u0432\u0001\u0000\u0000\u0000"+ - "\u0431\u0420\u0001\u0000\u0000\u0000\u0431\u042a\u0001\u0000\u0000\u0000"+ - "\u0432\u00ea\u0001\u0000\u0000\u0000\u0433\u0436\u0003\u00e9m\u0000\u0434"+ - "\u0436\u0003\u00adO\u0000\u0435\u0433\u0001\u0000\u0000\u0000\u0435\u0434"+ - "\u0001\u0000\u0000\u0000\u0436\u0437\u0001\u0000\u0000\u0000\u0437\u0435"+ - "\u0001\u0000\u0000\u0000\u0437\u0438\u0001\u0000\u0000\u0000\u0438\u00ec"+ - "\u0001\u0000\u0000\u0000\u0439\u043a\u00037\u0014\u0000\u043a\u043b\u0001"+ - "\u0000\u0000\u0000\u043b\u043c\u0006o\n\u0000\u043c\u00ee\u0001\u0000"+ - "\u0000\u0000\u043d\u043e\u00039\u0015\u0000\u043e\u043f\u0001\u0000\u0000"+ - "\u0000\u043f\u0440\u0006p\n\u0000\u0440\u00f0\u0001\u0000\u0000\u0000"+ - "\u0441\u0442\u0003;\u0016\u0000\u0442\u0443\u0001\u0000\u0000\u0000\u0443"+ - "\u0444\u0006q\n\u0000\u0444\u00f2\u0001\u0000\u0000\u0000\u0445\u0446"+ - "\u0003?\u0018\u0000\u0446\u0447\u0001\u0000\u0000\u0000\u0447\u0448\u0006"+ - "r\u0010\u0000\u0448\u0449\u0006r\u000b\u0000\u0449\u00f4\u0001\u0000\u0000"+ - "\u0000\u044a\u044b\u0003a)\u0000\u044b\u044c\u0001\u0000\u0000\u0000\u044c"+ - "\u044d\u0006s\u0013\u0000\u044d\u00f6\u0001\u0000\u0000\u0000\u044e\u044f"+ - "\u0003e+\u0000\u044f\u0450\u0001\u0000\u0000\u0000\u0450\u0451\u0006t"+ - "\u0012\u0000\u0451\u00f8\u0001\u0000\u0000\u0000\u0452\u0453\u0003i-\u0000"+ - "\u0453\u0454\u0001\u0000\u0000\u0000\u0454\u0455\u0006u\u0016\u0000\u0455"+ - "\u00fa\u0001\u0000\u0000\u0000\u0456\u0457\u0004v\u0006\u0000\u0457\u0458"+ - "\u0003\u00819\u0000\u0458\u0459\u0001\u0000\u0000\u0000\u0459\u045a\u0006"+ - "v\u0017\u0000\u045a\u00fc\u0001\u0000\u0000\u0000\u045b\u045c\u0004w\u0007"+ - "\u0000\u045c\u045d\u0003\u00a5K\u0000\u045d\u045e\u0001\u0000\u0000\u0000"+ - "\u045e\u045f\u0006w\u0018\u0000\u045f\u00fe\u0001\u0000\u0000\u0000\u0460"+ - "\u0461\u0007\f\u0000\u0000\u0461\u0462\u0007\u0002\u0000\u0000\u0462\u0100"+ - "\u0001\u0000\u0000\u0000\u0463\u0464\u0003\u00ebn\u0000\u0464\u0465\u0001"+ - "\u0000\u0000\u0000\u0465\u0466\u0006y\u0019\u0000\u0466\u0102\u0001\u0000"+ - "\u0000\u0000\u0467\u0468\u00037\u0014\u0000\u0468\u0469\u0001\u0000\u0000"+ - "\u0000\u0469\u046a\u0006z\n\u0000\u046a\u0104\u0001\u0000\u0000\u0000"+ - "\u046b\u046c\u00039\u0015\u0000\u046c\u046d\u0001\u0000\u0000\u0000\u046d"+ - "\u046e\u0006{\n\u0000\u046e\u0106\u0001\u0000\u0000\u0000\u046f\u0470"+ - "\u0003;\u0016\u0000\u0470\u0471\u0001\u0000\u0000\u0000\u0471\u0472\u0006"+ - "|\n\u0000\u0472\u0108\u0001\u0000\u0000\u0000\u0473\u0474\u0003?\u0018"+ - "\u0000\u0474\u0475\u0001\u0000\u0000\u0000\u0475\u0476\u0006}\u0010\u0000"+ - "\u0476\u0477\u0006}\u000b\u0000\u0477\u010a\u0001\u0000\u0000\u0000\u0478"+ - "\u0479\u0003\u00a7L\u0000\u0479\u047a\u0001\u0000\u0000\u0000\u047a\u047b"+ - "\u0006~\u000e\u0000\u047b\u047c\u0006~\u001a\u0000\u047c\u010c\u0001\u0000"+ - "\u0000\u0000\u047d\u047e\u0007\u0007\u0000\u0000\u047e\u047f\u0007\t\u0000"+ - "\u0000\u047f\u0480\u0001\u0000\u0000\u0000\u0480\u0481\u0006\u007f\u001b"+ - "\u0000\u0481\u010e\u0001\u0000\u0000\u0000\u0482\u0483\u0007\u0013\u0000"+ - "\u0000\u0483\u0484\u0007\u0001\u0000\u0000\u0484\u0485\u0007\u0005\u0000"+ - "\u0000\u0485\u0486\u0007\n\u0000\u0000\u0486\u0487\u0001\u0000\u0000\u0000"+ - "\u0487\u0488\u0006\u0080\u001b\u0000\u0488\u0110\u0001\u0000\u0000\u0000"+ - "\u0489\u048a\b\"\u0000\u0000\u048a\u0112\u0001\u0000\u0000\u0000\u048b"+ - "\u048d\u0003\u0111\u0081\u0000\u048c\u048b\u0001\u0000\u0000\u0000\u048d"+ - "\u048e\u0001\u0000\u0000\u0000\u048e\u048c\u0001\u0000\u0000\u0000\u048e"+ - "\u048f\u0001\u0000\u0000\u0000\u048f\u0490\u0001\u0000\u0000\u0000\u0490"+ - "\u0491\u0003=\u0017\u0000\u0491\u0493\u0001\u0000\u0000\u0000\u0492\u048c"+ - "\u0001\u0000\u0000\u0000\u0492\u0493\u0001\u0000\u0000\u0000\u0493\u0495"+ - "\u0001\u0000\u0000\u0000\u0494\u0496\u0003\u0111\u0081\u0000\u0495\u0494"+ - "\u0001\u0000\u0000\u0000\u0496\u0497\u0001\u0000\u0000\u0000\u0497\u0495"+ - "\u0001\u0000\u0000\u0000\u0497\u0498\u0001\u0000\u0000\u0000\u0498\u0114"+ - "\u0001\u0000\u0000\u0000\u0499\u049a\u0003\u0113\u0082\u0000\u049a\u049b"+ - "\u0001\u0000\u0000\u0000\u049b\u049c\u0006\u0083\u001c\u0000\u049c\u0116"+ - "\u0001\u0000\u0000\u0000\u049d\u049e\u00037\u0014\u0000\u049e\u049f\u0001"+ - "\u0000\u0000\u0000\u049f\u04a0\u0006\u0084\n\u0000\u04a0\u0118\u0001\u0000"+ - "\u0000\u0000\u04a1\u04a2\u00039\u0015\u0000\u04a2\u04a3\u0001\u0000\u0000"+ - "\u0000\u04a3\u04a4\u0006\u0085\n\u0000\u04a4\u011a\u0001\u0000\u0000\u0000"+ - "\u04a5\u04a6\u0003;\u0016\u0000\u04a6\u04a7\u0001\u0000\u0000\u0000\u04a7"+ - "\u04a8\u0006\u0086\n\u0000\u04a8\u011c\u0001\u0000\u0000\u0000\u04a9\u04aa"+ - "\u0003?\u0018\u0000\u04aa\u04ab\u0001\u0000\u0000\u0000\u04ab\u04ac\u0006"+ - "\u0087\u0010\u0000\u04ac\u04ad\u0006\u0087\u000b\u0000\u04ad\u04ae\u0006"+ - "\u0087\u000b\u0000\u04ae\u011e\u0001\u0000\u0000\u0000\u04af\u04b0\u0003"+ - "a)\u0000\u04b0\u04b1\u0001\u0000\u0000\u0000\u04b1\u04b2\u0006\u0088\u0013"+ - "\u0000\u04b2\u0120\u0001\u0000\u0000\u0000\u04b3\u04b4\u0003e+\u0000\u04b4"+ - "\u04b5\u0001\u0000\u0000\u0000\u04b5\u04b6\u0006\u0089\u0012\u0000\u04b6"+ - "\u0122\u0001\u0000\u0000\u0000\u04b7\u04b8\u0003i-\u0000\u04b8\u04b9\u0001"+ - "\u0000\u0000\u0000\u04b9\u04ba\u0006\u008a\u0016\u0000\u04ba\u0124\u0001"+ - "\u0000\u0000\u0000\u04bb\u04bc\u0003\u010f\u0080\u0000\u04bc\u04bd\u0001"+ - "\u0000\u0000\u0000\u04bd\u04be\u0006\u008b\u001d\u0000\u04be\u0126\u0001"+ - "\u0000\u0000\u0000\u04bf\u04c0\u0003\u00ebn\u0000\u04c0\u04c1\u0001\u0000"+ - "\u0000\u0000\u04c1\u04c2\u0006\u008c\u0019\u0000\u04c2\u0128\u0001\u0000"+ - "\u0000\u0000\u04c3\u04c4\u0003\u00afP\u0000\u04c4\u04c5\u0001\u0000\u0000"+ - "\u0000\u04c5\u04c6\u0006\u008d\u001e\u0000\u04c6\u012a\u0001\u0000\u0000"+ - "\u0000\u04c7\u04c8\u0004\u008e\b\u0000\u04c8\u04c9\u0003\u00819\u0000"+ - "\u04c9\u04ca\u0001\u0000\u0000\u0000\u04ca\u04cb\u0006\u008e\u0017\u0000"+ - "\u04cb\u012c\u0001\u0000\u0000\u0000\u04cc\u04cd\u0004\u008f\t\u0000\u04cd"+ - "\u04ce\u0003\u00a5K\u0000\u04ce\u04cf\u0001\u0000\u0000\u0000\u04cf\u04d0"+ - "\u0006\u008f\u0018\u0000\u04d0\u012e\u0001\u0000\u0000\u0000\u04d1\u04d2"+ - "\u00037\u0014\u0000\u04d2\u04d3\u0001\u0000\u0000\u0000\u04d3\u04d4\u0006"+ - "\u0090\n\u0000\u04d4\u0130\u0001\u0000\u0000\u0000\u04d5\u04d6\u00039"+ - "\u0015\u0000\u04d6\u04d7\u0001\u0000\u0000\u0000\u04d7\u04d8\u0006\u0091"+ - "\n\u0000\u04d8\u0132\u0001\u0000\u0000\u0000\u04d9\u04da\u0003;\u0016"+ - "\u0000\u04da\u04db\u0001\u0000\u0000\u0000\u04db\u04dc\u0006\u0092\n\u0000"+ - "\u04dc\u0134\u0001\u0000\u0000\u0000\u04dd\u04de\u0003?\u0018\u0000\u04de"+ - "\u04df\u0001\u0000\u0000\u0000\u04df\u04e0\u0006\u0093\u0010\u0000\u04e0"+ - "\u04e1\u0006\u0093\u000b\u0000\u04e1\u0136\u0001\u0000\u0000\u0000\u04e2"+ - "\u04e3\u0003i-\u0000\u04e3\u04e4\u0001\u0000\u0000\u0000\u04e4\u04e5\u0006"+ - "\u0094\u0016\u0000\u04e5\u0138\u0001\u0000\u0000\u0000\u04e6\u04e7\u0004"+ - "\u0095\n\u0000\u04e7\u04e8\u0003\u00819\u0000\u04e8\u04e9\u0001\u0000"+ - "\u0000\u0000\u04e9\u04ea\u0006\u0095\u0017\u0000\u04ea\u013a\u0001\u0000"+ - "\u0000\u0000\u04eb\u04ec\u0004\u0096\u000b\u0000\u04ec\u04ed\u0003\u00a5"+ - "K\u0000\u04ed\u04ee\u0001\u0000\u0000\u0000\u04ee\u04ef\u0006\u0096\u0018"+ - "\u0000\u04ef\u013c\u0001\u0000\u0000\u0000\u04f0\u04f1\u0003\u00afP\u0000"+ - "\u04f1\u04f2\u0001\u0000\u0000\u0000\u04f2\u04f3\u0006\u0097\u001e\u0000"+ - "\u04f3\u013e\u0001\u0000\u0000\u0000\u04f4\u04f5\u0003\u00abN\u0000\u04f5"+ - "\u04f6\u0001\u0000\u0000\u0000\u04f6\u04f7\u0006\u0098\u001f\u0000\u04f7"+ - "\u0140\u0001\u0000\u0000\u0000\u04f8\u04f9\u00037\u0014\u0000\u04f9\u04fa"+ - "\u0001\u0000\u0000\u0000\u04fa\u04fb\u0006\u0099\n\u0000\u04fb\u0142\u0001"+ - "\u0000\u0000\u0000\u04fc\u04fd\u00039\u0015\u0000\u04fd\u04fe\u0001\u0000"+ - "\u0000\u0000\u04fe\u04ff\u0006\u009a\n\u0000\u04ff\u0144\u0001\u0000\u0000"+ - "\u0000\u0500\u0501\u0003;\u0016\u0000\u0501\u0502\u0001\u0000\u0000\u0000"+ - "\u0502\u0503\u0006\u009b\n\u0000\u0503\u0146\u0001\u0000\u0000\u0000\u0504"+ - "\u0505\u0003?\u0018\u0000\u0505\u0506\u0001\u0000\u0000\u0000\u0506\u0507"+ - "\u0006\u009c\u0010\u0000\u0507\u0508\u0006\u009c\u000b\u0000\u0508\u0148"+ - "\u0001\u0000\u0000\u0000\u0509\u050a\u0007\u0001\u0000\u0000\u050a\u050b"+ - "\u0007\t\u0000\u0000\u050b\u050c\u0007\u000f\u0000\u0000\u050c\u050d\u0007"+ - "\u0007\u0000\u0000\u050d\u014a\u0001\u0000\u0000\u0000\u050e\u050f\u0003"+ - "7\u0014\u0000\u050f\u0510\u0001\u0000\u0000\u0000\u0510\u0511\u0006\u009e"+ - "\n\u0000\u0511\u014c\u0001\u0000\u0000\u0000\u0512\u0513\u00039\u0015"+ - "\u0000\u0513\u0514\u0001\u0000\u0000\u0000\u0514\u0515\u0006\u009f\n\u0000"+ - "\u0515\u014e\u0001\u0000\u0000\u0000\u0516\u0517\u0003;\u0016\u0000\u0517"+ - "\u0518\u0001\u0000\u0000\u0000\u0518\u0519\u0006\u00a0\n\u0000\u0519\u0150"+ - "\u0001\u0000\u0000\u0000\u051a\u051b\u0003\u00a9M\u0000\u051b\u051c\u0001"+ - "\u0000\u0000\u0000\u051c\u051d\u0006\u00a1\u0011\u0000\u051d\u051e\u0006"+ - "\u00a1\u000b\u0000\u051e\u0152\u0001\u0000\u0000\u0000\u051f\u0520\u0003"+ - "=\u0017\u0000\u0520\u0521\u0001\u0000\u0000\u0000\u0521\u0522\u0006\u00a2"+ - "\f\u0000\u0522\u0154\u0001\u0000\u0000\u0000\u0523\u0529\u0003K\u001e"+ - "\u0000\u0524\u0529\u0003A\u0019\u0000\u0525\u0529\u0003i-\u0000\u0526"+ - "\u0529\u0003C\u001a\u0000\u0527\u0529\u0003Q!\u0000\u0528\u0523\u0001"+ - "\u0000\u0000\u0000\u0528\u0524\u0001\u0000\u0000\u0000\u0528\u0525\u0001"+ - "\u0000\u0000\u0000\u0528\u0526\u0001\u0000\u0000\u0000\u0528\u0527\u0001"+ - "\u0000\u0000\u0000\u0529\u052a\u0001\u0000\u0000\u0000\u052a\u0528\u0001"+ - "\u0000\u0000\u0000\u052a\u052b\u0001\u0000\u0000\u0000\u052b\u0156\u0001"+ - "\u0000\u0000\u0000\u052c\u052d\u00037\u0014\u0000\u052d\u052e\u0001\u0000"+ - "\u0000\u0000\u052e\u052f\u0006\u00a4\n\u0000\u052f\u0158\u0001\u0000\u0000"+ - "\u0000\u0530\u0531\u00039\u0015\u0000\u0531\u0532\u0001\u0000\u0000\u0000"+ - "\u0532\u0533\u0006\u00a5\n\u0000\u0533\u015a\u0001\u0000\u0000\u0000\u0534"+ - "\u0535\u0003;\u0016\u0000\u0535\u0536\u0001\u0000\u0000\u0000\u0536\u0537"+ - "\u0006\u00a6\n\u0000\u0537\u015c\u0001\u0000\u0000\u0000\u0538\u0539\u0003"+ - "?\u0018\u0000\u0539\u053a\u0001\u0000\u0000\u0000\u053a\u053b\u0006\u00a7"+ - "\u0010\u0000\u053b\u053c\u0006\u00a7\u000b\u0000\u053c\u015e\u0001\u0000"+ - "\u0000\u0000\u053d\u053e\u0003=\u0017\u0000\u053e\u053f\u0001\u0000\u0000"+ - "\u0000\u053f\u0540\u0006\u00a8\f\u0000\u0540\u0160\u0001\u0000\u0000\u0000"+ - "\u0541\u0542\u0003e+\u0000\u0542\u0543\u0001\u0000\u0000\u0000\u0543\u0544"+ - "\u0006\u00a9\u0012\u0000\u0544\u0162\u0001\u0000\u0000\u0000\u0545\u0546"+ - "\u0003i-\u0000\u0546\u0547\u0001\u0000\u0000\u0000\u0547\u0548\u0006\u00aa"+ - "\u0016\u0000\u0548\u0164\u0001\u0000\u0000\u0000\u0549\u054a\u0003\u010d"+ - "\u007f\u0000\u054a\u054b\u0001\u0000\u0000\u0000\u054b\u054c\u0006\u00ab"+ - " \u0000\u054c\u054d\u0006\u00ab!\u0000\u054d\u0166\u0001\u0000\u0000\u0000"+ - "\u054e\u054f\u0003\u00d1a\u0000\u054f\u0550\u0001\u0000\u0000\u0000\u0550"+ - "\u0551\u0006\u00ac\u0014\u0000\u0551\u0168\u0001\u0000\u0000\u0000\u0552"+ - "\u0553\u0003U#\u0000\u0553\u0554\u0001\u0000\u0000\u0000\u0554\u0555\u0006"+ - "\u00ad\u0015\u0000\u0555\u016a\u0001\u0000\u0000\u0000\u0556\u0557\u0003"+ - "7\u0014\u0000\u0557\u0558\u0001\u0000\u0000\u0000\u0558\u0559\u0006\u00ae"+ - "\n\u0000\u0559\u016c\u0001\u0000\u0000\u0000\u055a\u055b\u00039\u0015"+ - "\u0000\u055b\u055c\u0001\u0000\u0000\u0000\u055c\u055d\u0006\u00af\n\u0000"+ - "\u055d\u016e\u0001\u0000\u0000\u0000\u055e\u055f\u0003;\u0016\u0000\u055f"+ - "\u0560\u0001\u0000\u0000\u0000\u0560\u0561\u0006\u00b0\n\u0000\u0561\u0170"+ - "\u0001\u0000\u0000\u0000\u0562\u0563\u0003?\u0018\u0000\u0563\u0564\u0001"+ - "\u0000\u0000\u0000\u0564\u0565\u0006\u00b1\u0010\u0000\u0565\u0566\u0006"+ - "\u00b1\u000b\u0000\u0566\u0567\u0006\u00b1\u000b\u0000\u0567\u0172\u0001"+ - "\u0000\u0000\u0000\u0568\u0569\u0003e+\u0000\u0569\u056a\u0001\u0000\u0000"+ - "\u0000\u056a\u056b\u0006\u00b2\u0012\u0000\u056b\u0174\u0001\u0000\u0000"+ - "\u0000\u056c\u056d\u0003i-\u0000\u056d\u056e\u0001\u0000\u0000\u0000\u056e"+ - "\u056f\u0006\u00b3\u0016\u0000\u056f\u0176\u0001\u0000\u0000\u0000\u0570"+ - "\u0571\u0003\u00ebn\u0000\u0571\u0572\u0001\u0000\u0000\u0000\u0572\u0573"+ - "\u0006\u00b4\u0019\u0000\u0573\u0178\u0001\u0000\u0000\u0000\u0574\u0575"+ - "\u00037\u0014\u0000\u0575\u0576\u0001\u0000\u0000\u0000\u0576\u0577\u0006"+ - "\u00b5\n\u0000\u0577\u017a\u0001\u0000\u0000\u0000\u0578\u0579\u00039"+ - "\u0015\u0000\u0579\u057a\u0001\u0000\u0000\u0000\u057a\u057b\u0006\u00b6"+ - "\n\u0000\u057b\u017c\u0001\u0000\u0000\u0000\u057c\u057d\u0003;\u0016"+ - "\u0000\u057d\u057e\u0001\u0000\u0000\u0000\u057e\u057f\u0006\u00b7\n\u0000"+ - "\u057f\u017e\u0001\u0000\u0000\u0000\u0580\u0581\u0003?\u0018\u0000\u0581"+ - "\u0582\u0001\u0000\u0000\u0000\u0582\u0583\u0006\u00b8\u0010\u0000\u0583"+ - "\u0584\u0006\u00b8\u000b\u0000\u0584\u0180\u0001\u0000\u0000\u0000\u0585"+ - "\u0586\u0003\u00d1a\u0000\u0586\u0587\u0001\u0000\u0000\u0000\u0587\u0588"+ - "\u0006\u00b9\u0014\u0000\u0588\u0589\u0006\u00b9\u000b\u0000\u0589\u058a"+ - "\u0006\u00b9\"\u0000\u058a\u0182\u0001\u0000\u0000\u0000\u058b\u058c\u0003"+ - "U#\u0000\u058c\u058d\u0001\u0000\u0000\u0000\u058d\u058e\u0006\u00ba\u0015"+ - "\u0000\u058e\u058f\u0006\u00ba\u000b\u0000\u058f\u0590\u0006\u00ba\"\u0000"+ - "\u0590\u0184\u0001\u0000\u0000\u0000\u0591\u0592\u00037\u0014\u0000\u0592"+ - "\u0593\u0001\u0000\u0000\u0000\u0593\u0594\u0006\u00bb\n\u0000\u0594\u0186"+ - "\u0001\u0000\u0000\u0000\u0595\u0596\u00039\u0015\u0000\u0596\u0597\u0001"+ - "\u0000\u0000\u0000\u0597\u0598\u0006\u00bc\n\u0000\u0598\u0188\u0001\u0000"+ - "\u0000\u0000\u0599\u059a\u0003;\u0016\u0000\u059a\u059b\u0001\u0000\u0000"+ - "\u0000\u059b\u059c\u0006\u00bd\n\u0000\u059c\u018a\u0001\u0000\u0000\u0000"+ - "\u059d\u059e\u0003=\u0017\u0000\u059e\u059f\u0001\u0000\u0000\u0000\u059f"+ - "\u05a0\u0006\u00be\f\u0000\u05a0\u05a1\u0006\u00be\u000b\u0000\u05a1\u05a2"+ - "\u0006\u00be\t\u0000\u05a2\u018c\u0001\u0000\u0000\u0000\u05a3\u05a4\u0003"+ - "e+\u0000\u05a4\u05a5\u0001\u0000\u0000\u0000\u05a5\u05a6\u0006\u00bf\u0012"+ - "\u0000\u05a6\u05a7\u0006\u00bf\u000b\u0000\u05a7\u05a8\u0006\u00bf\t\u0000"+ - "\u05a8\u018e\u0001\u0000\u0000\u0000\u05a9\u05aa\u00037\u0014\u0000\u05aa"+ - "\u05ab\u0001\u0000\u0000\u0000\u05ab\u05ac\u0006\u00c0\n\u0000\u05ac\u0190"+ - "\u0001\u0000\u0000\u0000\u05ad\u05ae\u00039\u0015\u0000\u05ae\u05af\u0001"+ - "\u0000\u0000\u0000\u05af\u05b0\u0006\u00c1\n\u0000\u05b0\u0192\u0001\u0000"+ - "\u0000\u0000\u05b1\u05b2\u0003;\u0016\u0000\u05b2\u05b3\u0001\u0000\u0000"+ - "\u0000\u05b3\u05b4\u0006\u00c2\n\u0000\u05b4\u0194\u0001\u0000\u0000\u0000"+ - "\u05b5\u05b6\u0003\u00afP\u0000\u05b6\u05b7\u0001\u0000\u0000\u0000\u05b7"+ - "\u05b8\u0006\u00c3\u000b\u0000\u05b8\u05b9\u0006\u00c3\u0000\u0000\u05b9"+ - "\u05ba\u0006\u00c3\u001e\u0000\u05ba\u0196\u0001\u0000\u0000\u0000\u05bb"+ - "\u05bc\u0003\u00abN\u0000\u05bc\u05bd\u0001\u0000\u0000\u0000\u05bd\u05be"+ - "\u0006\u00c4\u000b\u0000\u05be\u05bf\u0006\u00c4\u0000\u0000\u05bf\u05c0"+ - "\u0006\u00c4\u001f\u0000\u05c0\u0198\u0001\u0000\u0000\u0000\u05c1\u05c2"+ - "\u0003[&\u0000\u05c2\u05c3\u0001\u0000\u0000\u0000\u05c3\u05c4\u0006\u00c5"+ - "\u000b\u0000\u05c4\u05c5\u0006\u00c5\u0000\u0000\u05c5\u05c6\u0006\u00c5"+ - "#\u0000\u05c6\u019a\u0001\u0000\u0000\u0000\u05c7\u05c8\u0003?\u0018\u0000"+ - "\u05c8\u05c9\u0001\u0000\u0000\u0000\u05c9\u05ca\u0006\u00c6\u0010\u0000"+ - "\u05ca\u05cb\u0006\u00c6\u000b\u0000\u05cb\u019c\u0001\u0000\u0000\u0000"+ - "A\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e"+ - "\u0245\u024f\u0253\u0256\u025f\u0261\u026c\u0281\u0286\u028f\u0296\u029b"+ - "\u029d\u02a8\u02b0\u02b3\u02b5\u02ba\u02bf\u02c5\u02cc\u02d1\u02d7\u02da"+ - "\u02e2\u02e6\u0369\u036e\u0375\u0377\u0387\u038c\u0391\u0393\u0399\u03e6"+ - "\u03eb\u041c\u0420\u0425\u042a\u042f\u0431\u0435\u0437\u048e\u0492\u0497"+ - "\u0528\u052a$\u0005\u0001\u0000\u0005\u0004\u0000\u0005\u0006\u0000\u0005"+ - "\u0002\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\u0005\u0000\u0005\t"+ - "\u0000\u0005\u000b\u0000\u0005\r\u0000\u0000\u0001\u0000\u0004\u0000\u0000"+ - "\u0007\u0018\u0000\u0007\u0010\u0000\u0007A\u0000\u0005\u0000\u0000\u0007"+ - "\u0019\u0000\u0007B\u0000\u0007\"\u0000\u0007 \u0000\u0007L\u0000\u0007"+ - "\u001a\u0000\u0007$\u0000\u00070\u0000\u0007@\u0000\u0007P\u0000\u0005"+ - "\n\u0000\u0005\u0007\u0000\u0007Z\u0000\u0007Y\u0000\u0007D\u0000\u0007"+ - "C\u0000\u0007X\u0000\u0005\f\u0000\u0005\u000e\u0000\u0007\u001d\u0000"; + "\u00a7\u0004\u00a7\u056e\b\u00a7\u000b\u00a7\f\u00a7\u056f\u0001\u00a8"+ + "\u0001\u00a8\u0001\u00a8\u0001\u00a8\u0001\u00a9\u0001\u00a9\u0001\u00a9"+ + "\u0001\u00a9\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00aa\u0001\u00ab"+ + "\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ab\u0001\u00ac\u0001\u00ac"+ + "\u0001\u00ac\u0001\u00ac\u0001\u00ad\u0001\u00ad\u0001\u00ad\u0001\u00ad"+ + "\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00ae\u0001\u00af\u0001\u00af"+ + "\u0001\u00af\u0001\u00af\u0001\u00af\u0001\u00b0\u0001\u00b0\u0001\u00b0"+ + "\u0001\u00b0\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b1\u0001\u00b2"+ + "\u0001\u00b2\u0001\u00b2\u0001\u00b2\u0001\u00b3\u0001\u00b3\u0001\u00b3"+ + "\u0001\u00b3\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b4\u0001\u00b5"+ + "\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b5\u0001\u00b6"+ + "\u0001\u00b6\u0001\u00b6\u0001\u00b6\u0001\u00b7\u0001\u00b7\u0001\u00b7"+ + "\u0001\u00b7\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b8\u0001\u00b9"+ + "\u0001\u00b9\u0001\u00b9\u0001\u00b9\u0001\u00ba\u0001\u00ba\u0001\u00ba"+ + "\u0001\u00ba\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bb\u0001\u00bc"+ + "\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bc\u0001\u00bd\u0001\u00bd"+ + "\u0001\u00bd\u0001\u00bd\u0001\u00be\u0001\u00be\u0001\u00be\u0001\u00be"+ + "\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf\u0001\u00bf"+ + "\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c0"+ + "\u0001\u00c0\u0001\u00c0\u0001\u00c0\u0001\u00c1\u0001\u00c1\u0001\u00c1"+ + "\u0001\u00c1\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c2\u0001\u00c3"+ + "\u0001\u00c3\u0001\u00c3\u0001\u00c3\u0001\u00c4\u0001\u00c4\u0001\u00c4"+ + "\u0001\u00c4\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c5\u0001\u00c6"+ + "\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c6\u0001\u00c7\u0001\u00c7"+ + "\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c7\u0001\u00c8\u0001\u00c8"+ + "\u0001\u00c8\u0001\u00c8\u0001\u00c8\u0001\u00c8\u0001\u00c9\u0001\u00c9"+ + "\u0001\u00c9\u0001\u00c9\u0001\u00ca\u0001\u00ca\u0001\u00ca\u0001\u00ca"+ + "\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cb\u0001\u00cc\u0001\u00cc"+ + "\u0001\u00cc\u0001\u00cc\u0001\u00cc\u0001\u00cc\u0001\u00cd\u0001\u00cd"+ + "\u0001\u00cd\u0001\u00cd\u0001\u00cd\u0001\u00cd\u0001\u00ce\u0001\u00ce"+ + "\u0001\u00ce\u0001\u00ce\u0001\u00cf\u0001\u00cf\u0001\u00cf\u0001\u00cf"+ + "\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d0\u0001\u00d1\u0001\u00d1"+ + "\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0001\u00d1\u0001\u00d2\u0001\u00d2"+ + "\u0001\u00d2\u0001\u00d2\u0001\u00d2\u0001\u00d2\u0001\u00d3\u0001\u00d3"+ + "\u0001\u00d3\u0001\u00d3\u0001\u00d3\u0001\u00d3\u0001\u00d4\u0001\u00d4"+ + "\u0001\u00d4\u0001\u00d4\u0001\u00d4\u0002\u02ab\u02f0\u0000\u00d5\u0010"+ + "\u0001\u0012\u0002\u0014\u0003\u0016\u0004\u0018\u0005\u001a\u0006\u001c"+ + "\u0007\u001e\b \t\"\n$\u000b&\f(\r*\u000e,\u000f.\u00100\u00112\u0012"+ + "4\u00136\u00148\u0015:\u0016<\u0017>\u0018@\u0019B\u001aD\u001bF\u001c"+ + "H\u001dJ\u0000L\u0000N\u0000P\u0000R\u0000T\u0000V\u0000X\u0000Z\u0000"+ + "\\\u0000^\u001e`\u001fb d!f\"h#j$l%n&p\'r(t)v*x+z,|-~.\u0080/\u00820\u0084"+ + "1\u00862\u00883\u008a4\u008c5\u008e6\u00907\u00928\u00949\u0096:\u0098"+ + ";\u009a<\u009c=\u009e>\u00a0?\u00a2@\u00a4A\u00a6B\u00a8C\u00aaD\u00ac"+ + "\u0000\u00aeE\u00b0F\u00b2G\u00b4H\u00b6\u0000\u00b8I\u00baJ\u00bcK\u00be"+ + "L\u00c0\u0000\u00c2\u0000\u00c4M\u00c6N\u00c8O\u00ca\u0000\u00cc\u0000"+ + "\u00ce\u0000\u00d0\u0000\u00d2\u0000\u00d4\u0000\u00d6P\u00d8\u0000\u00da"+ + "Q\u00dc\u0000\u00de\u0000\u00e0R\u00e2S\u00e4T\u00e6\u0000\u00e8\u0000"+ + "\u00ea\u0000\u00ec\u0000\u00ee\u0000\u00f0\u0000\u00f2\u0000\u00f4U\u00f6"+ + "V\u00f8W\u00faX\u00fc\u0000\u00fe\u0000\u0100\u0000\u0102\u0000\u0104"+ + "\u0000\u0106\u0000\u0108Y\u010a\u0000\u010cZ\u010e[\u0110\\\u0112\u0000"+ + "\u0114\u0000\u0116]\u0118^\u011a\u0000\u011c_\u011e\u0000\u0120`\u0122"+ + "a\u0124b\u0126\u0000\u0128\u0000\u012a\u0000\u012c\u0000\u012e\u0000\u0130"+ + "\u0000\u0132\u0000\u0134\u0000\u0136\u0000\u0138c\u013ad\u013ce\u013e"+ + "\u0000\u0140\u0000\u0142\u0000\u0144\u0000\u0146\u0000\u0148\u0000\u014a"+ + "f\u014cg\u014eh\u0150\u0000\u0152i\u0154j\u0156k\u0158l\u015a\u0000\u015c"+ + "\u0000\u015em\u0160n\u0162o\u0164p\u0166\u0000\u0168\u0000\u016a\u0000"+ + "\u016c\u0000\u016e\u0000\u0170\u0000\u0172\u0000\u0174q\u0176r\u0178s"+ + "\u017a\u0000\u017c\u0000\u017e\u0000\u0180\u0000\u0182t\u0184u\u0186v"+ + "\u0188\u0000\u018a\u0000\u018c\u0000\u018e\u0000\u0190w\u0192\u0000\u0194"+ + "\u0000\u0196x\u0198y\u019az\u019c\u0000\u019e\u0000\u01a0\u0000\u01a2"+ + "{\u01a4|\u01a6}\u01a8\u0000\u01aa\u0000\u01ac~\u01ae\u007f\u01b0\u0080"+ + "\u01b2\u0000\u01b4\u0000\u01b6\u0000\u01b8\u0000\u0010\u0000\u0001\u0002"+ + "\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f$\u0002\u0000"+ + "DDdd\u0002\u0000IIii\u0002\u0000SSss\u0002\u0000EEee\u0002\u0000CCcc\u0002"+ + "\u0000TTtt\u0002\u0000RRrr\u0002\u0000OOoo\u0002\u0000PPpp\u0002\u0000"+ + "NNnn\u0002\u0000HHhh\u0002\u0000VVvv\u0002\u0000AAaa\u0002\u0000LLll\u0002"+ + "\u0000XXxx\u0002\u0000FFff\u0002\u0000MMmm\u0002\u0000GGgg\u0002\u0000"+ + "KKkk\u0002\u0000WWww\u0002\u0000UUuu\u0002\u0000JJjj\u0006\u0000\t\n\r"+ + "\r //[[]]\u0002\u0000\n\n\r\r\u0003\u0000\t\n\r\r \u0001\u000009\u0002"+ + "\u0000AZaz\b\u0000\"\"NNRRTT\\\\nnrrtt\u0004\u0000\n\n\r\r\"\"\\\\\u0002"+ + "\u0000++--\u0001\u0000``\u0002\u0000BBbb\u0002\u0000YYyy\u000b\u0000\t"+ + "\n\r\r \"\",,//::==[[]]||\u0002\u0000**//\u000b\u0000\t\n\r\r \"#,,"+ + "//::<<>?\\\\||\u065c\u0000\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001"+ + "\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001"+ + "\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001"+ + "\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001"+ + "\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000"+ + "\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000"+ + "\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000\u0000\u0000,"+ + "\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u00000\u0001\u0000"+ + "\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000"+ + "\u00006\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000\u0000\u0000:"+ + "\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000"+ + "\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000B\u0001\u0000\u0000\u0000"+ + "\u0000D\u0001\u0000\u0000\u0000\u0000F\u0001\u0000\u0000\u0000\u0001H"+ + "\u0001\u0000\u0000\u0000\u0001^\u0001\u0000\u0000\u0000\u0001`\u0001\u0000"+ + "\u0000\u0000\u0001b\u0001\u0000\u0000\u0000\u0001d\u0001\u0000\u0000\u0000"+ + "\u0001f\u0001\u0000\u0000\u0000\u0001h\u0001\u0000\u0000\u0000\u0001j"+ + "\u0001\u0000\u0000\u0000\u0001l\u0001\u0000\u0000\u0000\u0001n\u0001\u0000"+ + "\u0000\u0000\u0001p\u0001\u0000\u0000\u0000\u0001r\u0001\u0000\u0000\u0000"+ + "\u0001t\u0001\u0000\u0000\u0000\u0001v\u0001\u0000\u0000\u0000\u0001x"+ + "\u0001\u0000\u0000\u0000\u0001z\u0001\u0000\u0000\u0000\u0001|\u0001\u0000"+ + "\u0000\u0000\u0001~\u0001\u0000\u0000\u0000\u0001\u0080\u0001\u0000\u0000"+ + "\u0000\u0001\u0082\u0001\u0000\u0000\u0000\u0001\u0084\u0001\u0000\u0000"+ + "\u0000\u0001\u0086\u0001\u0000\u0000\u0000\u0001\u0088\u0001\u0000\u0000"+ + "\u0000\u0001\u008a\u0001\u0000\u0000\u0000\u0001\u008c\u0001\u0000\u0000"+ + "\u0000\u0001\u008e\u0001\u0000\u0000\u0000\u0001\u0090\u0001\u0000\u0000"+ + "\u0000\u0001\u0092\u0001\u0000\u0000\u0000\u0001\u0094\u0001\u0000\u0000"+ + "\u0000\u0001\u0096\u0001\u0000\u0000\u0000\u0001\u0098\u0001\u0000\u0000"+ + "\u0000\u0001\u009a\u0001\u0000\u0000\u0000\u0001\u009c\u0001\u0000\u0000"+ + "\u0000\u0001\u009e\u0001\u0000\u0000\u0000\u0001\u00a0\u0001\u0000\u0000"+ + "\u0000\u0001\u00a2\u0001\u0000\u0000\u0000\u0001\u00a4\u0001\u0000\u0000"+ + "\u0000\u0001\u00a6\u0001\u0000\u0000\u0000\u0001\u00a8\u0001\u0000\u0000"+ + "\u0000\u0001\u00aa\u0001\u0000\u0000\u0000\u0001\u00ac\u0001\u0000\u0000"+ + "\u0000\u0001\u00ae\u0001\u0000\u0000\u0000\u0001\u00b0\u0001\u0000\u0000"+ + "\u0000\u0001\u00b2\u0001\u0000\u0000\u0000\u0001\u00b4\u0001\u0000\u0000"+ + "\u0000\u0001\u00b8\u0001\u0000\u0000\u0000\u0001\u00ba\u0001\u0000\u0000"+ + "\u0000\u0001\u00bc\u0001\u0000\u0000\u0000\u0001\u00be\u0001\u0000\u0000"+ + "\u0000\u0002\u00c0\u0001\u0000\u0000\u0000\u0002\u00c2\u0001\u0000\u0000"+ + "\u0000\u0002\u00c4\u0001\u0000\u0000\u0000\u0002\u00c6\u0001\u0000\u0000"+ + "\u0000\u0002\u00c8\u0001\u0000\u0000\u0000\u0003\u00ca\u0001\u0000\u0000"+ + "\u0000\u0003\u00cc\u0001\u0000\u0000\u0000\u0003\u00ce\u0001\u0000\u0000"+ + "\u0000\u0003\u00d0\u0001\u0000\u0000\u0000\u0003\u00d2\u0001\u0000\u0000"+ + "\u0000\u0003\u00d4\u0001\u0000\u0000\u0000\u0003\u00d6\u0001\u0000\u0000"+ + "\u0000\u0003\u00da\u0001\u0000\u0000\u0000\u0003\u00dc\u0001\u0000\u0000"+ + "\u0000\u0003\u00de\u0001\u0000\u0000\u0000\u0003\u00e0\u0001\u0000\u0000"+ + "\u0000\u0003\u00e2\u0001\u0000\u0000\u0000\u0003\u00e4\u0001\u0000\u0000"+ + "\u0000\u0004\u00e6\u0001\u0000\u0000\u0000\u0004\u00e8\u0001\u0000\u0000"+ + "\u0000\u0004\u00ea\u0001\u0000\u0000\u0000\u0004\u00ec\u0001\u0000\u0000"+ + "\u0000\u0004\u00ee\u0001\u0000\u0000\u0000\u0004\u00f4\u0001\u0000\u0000"+ + "\u0000\u0004\u00f6\u0001\u0000\u0000\u0000\u0004\u00f8\u0001\u0000\u0000"+ + "\u0000\u0004\u00fa\u0001\u0000\u0000\u0000\u0005\u00fc\u0001\u0000\u0000"+ + "\u0000\u0005\u00fe\u0001\u0000\u0000\u0000\u0005\u0100\u0001\u0000\u0000"+ + "\u0000\u0005\u0102\u0001\u0000\u0000\u0000\u0005\u0104\u0001\u0000\u0000"+ + "\u0000\u0005\u0106\u0001\u0000\u0000\u0000\u0005\u0108\u0001\u0000\u0000"+ + "\u0000\u0005\u010a\u0001\u0000\u0000\u0000\u0005\u010c\u0001\u0000\u0000"+ + "\u0000\u0005\u010e\u0001\u0000\u0000\u0000\u0005\u0110\u0001\u0000\u0000"+ + "\u0000\u0006\u0112\u0001\u0000\u0000\u0000\u0006\u0114\u0001\u0000\u0000"+ + "\u0000\u0006\u0116\u0001\u0000\u0000\u0000\u0006\u0118\u0001\u0000\u0000"+ + "\u0000\u0006\u011c\u0001\u0000\u0000\u0000\u0006\u011e\u0001\u0000\u0000"+ + "\u0000\u0006\u0120\u0001\u0000\u0000\u0000\u0006\u0122\u0001\u0000\u0000"+ + "\u0000\u0006\u0124\u0001\u0000\u0000\u0000\u0007\u0126\u0001\u0000\u0000"+ + "\u0000\u0007\u0128\u0001\u0000\u0000\u0000\u0007\u012a\u0001\u0000\u0000"+ + "\u0000\u0007\u012c\u0001\u0000\u0000\u0000\u0007\u012e\u0001\u0000\u0000"+ + "\u0000\u0007\u0130\u0001\u0000\u0000\u0000\u0007\u0132\u0001\u0000\u0000"+ + "\u0000\u0007\u0134\u0001\u0000\u0000\u0000\u0007\u0136\u0001\u0000\u0000"+ + "\u0000\u0007\u0138\u0001\u0000\u0000\u0000\u0007\u013a\u0001\u0000\u0000"+ + "\u0000\u0007\u013c\u0001\u0000\u0000\u0000\b\u013e\u0001\u0000\u0000\u0000"+ + "\b\u0140\u0001\u0000\u0000\u0000\b\u0142\u0001\u0000\u0000\u0000\b\u0144"+ + "\u0001\u0000\u0000\u0000\b\u0146\u0001\u0000\u0000\u0000\b\u0148\u0001"+ + "\u0000\u0000\u0000\b\u014a\u0001\u0000\u0000\u0000\b\u014c\u0001\u0000"+ + "\u0000\u0000\b\u014e\u0001\u0000\u0000\u0000\t\u0150\u0001\u0000\u0000"+ + "\u0000\t\u0152\u0001\u0000\u0000\u0000\t\u0154\u0001\u0000\u0000\u0000"+ + "\t\u0156\u0001\u0000\u0000\u0000\t\u0158\u0001\u0000\u0000\u0000\n\u015a"+ + "\u0001\u0000\u0000\u0000\n\u015c\u0001\u0000\u0000\u0000\n\u015e\u0001"+ + "\u0000\u0000\u0000\n\u0160\u0001\u0000\u0000\u0000\n\u0162\u0001\u0000"+ + "\u0000\u0000\n\u0164\u0001\u0000\u0000\u0000\u000b\u0166\u0001\u0000\u0000"+ + "\u0000\u000b\u0168\u0001\u0000\u0000\u0000\u000b\u016a\u0001\u0000\u0000"+ + "\u0000\u000b\u016c\u0001\u0000\u0000\u0000\u000b\u016e\u0001\u0000\u0000"+ + "\u0000\u000b\u0170\u0001\u0000\u0000\u0000\u000b\u0172\u0001\u0000\u0000"+ + "\u0000\u000b\u0174\u0001\u0000\u0000\u0000\u000b\u0176\u0001\u0000\u0000"+ + "\u0000\u000b\u0178\u0001\u0000\u0000\u0000\f\u017a\u0001\u0000\u0000\u0000"+ + "\f\u017c\u0001\u0000\u0000\u0000\f\u017e\u0001\u0000\u0000\u0000\f\u0180"+ + "\u0001\u0000\u0000\u0000\f\u0182\u0001\u0000\u0000\u0000\f\u0184\u0001"+ + "\u0000\u0000\u0000\f\u0186\u0001\u0000\u0000\u0000\r\u0188\u0001\u0000"+ + "\u0000\u0000\r\u018a\u0001\u0000\u0000\u0000\r\u018c\u0001\u0000\u0000"+ + "\u0000\r\u018e\u0001\u0000\u0000\u0000\r\u0190\u0001\u0000\u0000\u0000"+ + "\r\u0192\u0001\u0000\u0000\u0000\r\u0194\u0001\u0000\u0000\u0000\r\u0196"+ + "\u0001\u0000\u0000\u0000\r\u0198\u0001\u0000\u0000\u0000\r\u019a\u0001"+ + "\u0000\u0000\u0000\u000e\u019c\u0001\u0000\u0000\u0000\u000e\u019e\u0001"+ + "\u0000\u0000\u0000\u000e\u01a0\u0001\u0000\u0000\u0000\u000e\u01a2\u0001"+ + "\u0000\u0000\u0000\u000e\u01a4\u0001\u0000\u0000\u0000\u000e\u01a6\u0001"+ + "\u0000\u0000\u0000\u000f\u01a8\u0001\u0000\u0000\u0000\u000f\u01aa\u0001"+ + "\u0000\u0000\u0000\u000f\u01ac\u0001\u0000\u0000\u0000\u000f\u01ae\u0001"+ + "\u0000\u0000\u0000\u000f\u01b0\u0001\u0000\u0000\u0000\u000f\u01b2\u0001"+ + "\u0000\u0000\u0000\u000f\u01b4\u0001\u0000\u0000\u0000\u000f\u01b6\u0001"+ + "\u0000\u0000\u0000\u000f\u01b8\u0001\u0000\u0000\u0000\u0010\u01ba\u0001"+ + "\u0000\u0000\u0000\u0012\u01c4\u0001\u0000\u0000\u0000\u0014\u01cb\u0001"+ + "\u0000\u0000\u0000\u0016\u01d4\u0001\u0000\u0000\u0000\u0018\u01db\u0001"+ + "\u0000\u0000\u0000\u001a\u01e5\u0001\u0000\u0000\u0000\u001c\u01ec\u0001"+ + "\u0000\u0000\u0000\u001e\u01f3\u0001\u0000\u0000\u0000 \u01fa\u0001\u0000"+ + "\u0000\u0000\"\u0202\u0001\u0000\u0000\u0000$\u020e\u0001\u0000\u0000"+ + "\u0000&\u0217\u0001\u0000\u0000\u0000(\u021d\u0001\u0000\u0000\u0000*"+ + "\u0224\u0001\u0000\u0000\u0000,\u022b\u0001\u0000\u0000\u0000.\u0233\u0001"+ + "\u0000\u0000\u00000\u023b\u0001\u0000\u0000\u00002\u024a\u0001\u0000\u0000"+ + "\u00004\u0256\u0001\u0000\u0000\u00006\u0261\u0001\u0000\u0000\u00008"+ + "\u0269\u0001\u0000\u0000\u0000:\u0271\u0001\u0000\u0000\u0000<\u0279\u0001"+ + "\u0000\u0000\u0000>\u0282\u0001\u0000\u0000\u0000@\u028d\u0001\u0000\u0000"+ + "\u0000B\u0293\u0001\u0000\u0000\u0000D\u02a4\u0001\u0000\u0000\u0000F"+ + "\u02b4\u0001\u0000\u0000\u0000H\u02ba\u0001\u0000\u0000\u0000J\u02be\u0001"+ + "\u0000\u0000\u0000L\u02c0\u0001\u0000\u0000\u0000N\u02c2\u0001\u0000\u0000"+ + "\u0000P\u02c5\u0001\u0000\u0000\u0000R\u02c7\u0001\u0000\u0000\u0000T"+ + "\u02d0\u0001\u0000\u0000\u0000V\u02d2\u0001\u0000\u0000\u0000X\u02d7\u0001"+ + "\u0000\u0000\u0000Z\u02d9\u0001\u0000\u0000\u0000\\\u02de\u0001\u0000"+ + "\u0000\u0000^\u02fd\u0001\u0000\u0000\u0000`\u0300\u0001\u0000\u0000\u0000"+ + "b\u032e\u0001\u0000\u0000\u0000d\u0330\u0001\u0000\u0000\u0000f\u0333"+ + "\u0001\u0000\u0000\u0000h\u0337\u0001\u0000\u0000\u0000j\u033b\u0001\u0000"+ + "\u0000\u0000l\u033d\u0001\u0000\u0000\u0000n\u0340\u0001\u0000\u0000\u0000"+ + "p\u0342\u0001\u0000\u0000\u0000r\u0344\u0001\u0000\u0000\u0000t\u0349"+ + "\u0001\u0000\u0000\u0000v\u034b\u0001\u0000\u0000\u0000x\u0351\u0001\u0000"+ + "\u0000\u0000z\u0357\u0001\u0000\u0000\u0000|\u035a\u0001\u0000\u0000\u0000"+ + "~\u035d\u0001\u0000\u0000\u0000\u0080\u0362\u0001\u0000\u0000\u0000\u0082"+ + "\u0367\u0001\u0000\u0000\u0000\u0084\u0369\u0001\u0000\u0000\u0000\u0086"+ + "\u036d\u0001\u0000\u0000\u0000\u0088\u0372\u0001\u0000\u0000\u0000\u008a"+ + "\u0378\u0001\u0000\u0000\u0000\u008c\u037b\u0001\u0000\u0000\u0000\u008e"+ + "\u037d\u0001\u0000\u0000\u0000\u0090\u0383\u0001\u0000\u0000\u0000\u0092"+ + "\u0385\u0001\u0000\u0000\u0000\u0094\u038a\u0001\u0000\u0000\u0000\u0096"+ + "\u038d\u0001\u0000\u0000\u0000\u0098\u0390\u0001\u0000\u0000\u0000\u009a"+ + "\u0393\u0001\u0000\u0000\u0000\u009c\u0395\u0001\u0000\u0000\u0000\u009e"+ + "\u0398\u0001\u0000\u0000\u0000\u00a0\u039a\u0001\u0000\u0000\u0000\u00a2"+ + "\u039d\u0001\u0000\u0000\u0000\u00a4\u039f\u0001\u0000\u0000\u0000\u00a6"+ + "\u03a1\u0001\u0000\u0000\u0000\u00a8\u03a3\u0001\u0000\u0000\u0000\u00aa"+ + "\u03a5\u0001\u0000\u0000\u0000\u00ac\u03a7\u0001\u0000\u0000\u0000\u00ae"+ + "\u03bc\u0001\u0000\u0000\u0000\u00b0\u03be\u0001\u0000\u0000\u0000\u00b2"+ + "\u03c3\u0001\u0000\u0000\u0000\u00b4\u03d8\u0001\u0000\u0000\u0000\u00b6"+ + "\u03da\u0001\u0000\u0000\u0000\u00b8\u03e2\u0001\u0000\u0000\u0000\u00ba"+ + "\u03e4\u0001\u0000\u0000\u0000\u00bc\u03e8\u0001\u0000\u0000\u0000\u00be"+ + "\u03ec\u0001\u0000\u0000\u0000\u00c0\u03f0\u0001\u0000\u0000\u0000\u00c2"+ + "\u03f5\u0001\u0000\u0000\u0000\u00c4\u03fa\u0001\u0000\u0000\u0000\u00c6"+ + "\u03fe\u0001\u0000\u0000\u0000\u00c8\u0402\u0001\u0000\u0000\u0000\u00ca"+ + "\u0406\u0001\u0000\u0000\u0000\u00cc\u040b\u0001\u0000\u0000\u0000\u00ce"+ + "\u040f\u0001\u0000\u0000\u0000\u00d0\u0413\u0001\u0000\u0000\u0000\u00d2"+ + "\u0417\u0001\u0000\u0000\u0000\u00d4\u041b\u0001\u0000\u0000\u0000\u00d6"+ + "\u041f\u0001\u0000\u0000\u0000\u00d8\u042b\u0001\u0000\u0000\u0000\u00da"+ + "\u042e\u0001\u0000\u0000\u0000\u00dc\u0432\u0001\u0000\u0000\u0000\u00de"+ + "\u0436\u0001\u0000\u0000\u0000\u00e0\u043a\u0001\u0000\u0000\u0000\u00e2"+ + "\u043e\u0001\u0000\u0000\u0000\u00e4\u0442\u0001\u0000\u0000\u0000\u00e6"+ + "\u0446\u0001\u0000\u0000\u0000\u00e8\u044b\u0001\u0000\u0000\u0000\u00ea"+ + "\u044f\u0001\u0000\u0000\u0000\u00ec\u0453\u0001\u0000\u0000\u0000\u00ee"+ + "\u0458\u0001\u0000\u0000\u0000\u00f0\u0461\u0001\u0000\u0000\u0000\u00f2"+ + "\u0476\u0001\u0000\u0000\u0000\u00f4\u047a\u0001\u0000\u0000\u0000\u00f6"+ + "\u047e\u0001\u0000\u0000\u0000\u00f8\u0482\u0001\u0000\u0000\u0000\u00fa"+ + "\u0486\u0001\u0000\u0000\u0000\u00fc\u048a\u0001\u0000\u0000\u0000\u00fe"+ + "\u048f\u0001\u0000\u0000\u0000\u0100\u0493\u0001\u0000\u0000\u0000\u0102"+ + "\u0497\u0001\u0000\u0000\u0000\u0104\u049b\u0001\u0000\u0000\u0000\u0106"+ + "\u04a0\u0001\u0000\u0000\u0000\u0108\u04a5\u0001\u0000\u0000\u0000\u010a"+ + "\u04a8\u0001\u0000\u0000\u0000\u010c\u04ac\u0001\u0000\u0000\u0000\u010e"+ + "\u04b0\u0001\u0000\u0000\u0000\u0110\u04b4\u0001\u0000\u0000\u0000\u0112"+ + "\u04b8\u0001\u0000\u0000\u0000\u0114\u04bd\u0001\u0000\u0000\u0000\u0116"+ + "\u04c2\u0001\u0000\u0000\u0000\u0118\u04c7\u0001\u0000\u0000\u0000\u011a"+ + "\u04ce\u0001\u0000\u0000\u0000\u011c\u04d7\u0001\u0000\u0000\u0000\u011e"+ + "\u04de\u0001\u0000\u0000\u0000\u0120\u04e2\u0001\u0000\u0000\u0000\u0122"+ + "\u04e6\u0001\u0000\u0000\u0000\u0124\u04ea\u0001\u0000\u0000\u0000\u0126"+ + "\u04ee\u0001\u0000\u0000\u0000\u0128\u04f4\u0001\u0000\u0000\u0000\u012a"+ + "\u04f8\u0001\u0000\u0000\u0000\u012c\u04fc\u0001\u0000\u0000\u0000\u012e"+ + "\u0500\u0001\u0000\u0000\u0000\u0130\u0504\u0001\u0000\u0000\u0000\u0132"+ + "\u0508\u0001\u0000\u0000\u0000\u0134\u050c\u0001\u0000\u0000\u0000\u0136"+ + "\u0511\u0001\u0000\u0000\u0000\u0138\u0516\u0001\u0000\u0000\u0000\u013a"+ + "\u051a\u0001\u0000\u0000\u0000\u013c\u051e\u0001\u0000\u0000\u0000\u013e"+ + "\u0522\u0001\u0000\u0000\u0000\u0140\u0527\u0001\u0000\u0000\u0000\u0142"+ + "\u052b\u0001\u0000\u0000\u0000\u0144\u0530\u0001\u0000\u0000\u0000\u0146"+ + "\u0535\u0001\u0000\u0000\u0000\u0148\u0539\u0001\u0000\u0000\u0000\u014a"+ + "\u053d\u0001\u0000\u0000\u0000\u014c\u0541\u0001\u0000\u0000\u0000\u014e"+ + "\u0545\u0001\u0000\u0000\u0000\u0150\u0549\u0001\u0000\u0000\u0000\u0152"+ + "\u054e\u0001\u0000\u0000\u0000\u0154\u0553\u0001\u0000\u0000\u0000\u0156"+ + "\u0557\u0001\u0000\u0000\u0000\u0158\u055b\u0001\u0000\u0000\u0000\u015a"+ + "\u055f\u0001\u0000\u0000\u0000\u015c\u0564\u0001\u0000\u0000\u0000\u015e"+ + "\u056d\u0001\u0000\u0000\u0000\u0160\u0571\u0001\u0000\u0000\u0000\u0162"+ + "\u0575\u0001\u0000\u0000\u0000\u0164\u0579\u0001\u0000\u0000\u0000\u0166"+ + "\u057d\u0001\u0000\u0000\u0000\u0168\u0582\u0001\u0000\u0000\u0000\u016a"+ + "\u0586\u0001\u0000\u0000\u0000\u016c\u058a\u0001\u0000\u0000\u0000\u016e"+ + "\u058e\u0001\u0000\u0000\u0000\u0170\u0593\u0001\u0000\u0000\u0000\u0172"+ + "\u0597\u0001\u0000\u0000\u0000\u0174\u059b\u0001\u0000\u0000\u0000\u0176"+ + "\u059f\u0001\u0000\u0000\u0000\u0178\u05a3\u0001\u0000\u0000\u0000\u017a"+ + "\u05a7\u0001\u0000\u0000\u0000\u017c\u05ad\u0001\u0000\u0000\u0000\u017e"+ + "\u05b1\u0001\u0000\u0000\u0000\u0180\u05b5\u0001\u0000\u0000\u0000\u0182"+ + "\u05b9\u0001\u0000\u0000\u0000\u0184\u05bd\u0001\u0000\u0000\u0000\u0186"+ + "\u05c1\u0001\u0000\u0000\u0000\u0188\u05c5\u0001\u0000\u0000\u0000\u018a"+ + "\u05ca\u0001\u0000\u0000\u0000\u018c\u05ce\u0001\u0000\u0000\u0000\u018e"+ + "\u05d2\u0001\u0000\u0000\u0000\u0190\u05d8\u0001\u0000\u0000\u0000\u0192"+ + "\u05e1\u0001\u0000\u0000\u0000\u0194\u05e5\u0001\u0000\u0000\u0000\u0196"+ + "\u05e9\u0001\u0000\u0000\u0000\u0198\u05ed\u0001\u0000\u0000\u0000\u019a"+ + "\u05f1\u0001\u0000\u0000\u0000\u019c\u05f5\u0001\u0000\u0000\u0000\u019e"+ + "\u05fa\u0001\u0000\u0000\u0000\u01a0\u0600\u0001\u0000\u0000\u0000\u01a2"+ + "\u0606\u0001\u0000\u0000\u0000\u01a4\u060a\u0001\u0000\u0000\u0000\u01a6"+ + "\u060e\u0001\u0000\u0000\u0000\u01a8\u0612\u0001\u0000\u0000\u0000\u01aa"+ + "\u0618\u0001\u0000\u0000\u0000\u01ac\u061e\u0001\u0000\u0000\u0000\u01ae"+ + "\u0622\u0001\u0000\u0000\u0000\u01b0\u0626\u0001\u0000\u0000\u0000\u01b2"+ + "\u062a\u0001\u0000\u0000\u0000\u01b4\u0630\u0001\u0000\u0000\u0000\u01b6"+ + "\u0636\u0001\u0000\u0000\u0000\u01b8\u063c\u0001\u0000\u0000\u0000\u01ba"+ + "\u01bb\u0007\u0000\u0000\u0000\u01bb\u01bc\u0007\u0001\u0000\u0000\u01bc"+ + "\u01bd\u0007\u0002\u0000\u0000\u01bd\u01be\u0007\u0002\u0000\u0000\u01be"+ + "\u01bf\u0007\u0003\u0000\u0000\u01bf\u01c0\u0007\u0004\u0000\u0000\u01c0"+ + "\u01c1\u0007\u0005\u0000\u0000\u01c1\u01c2\u0001\u0000\u0000\u0000\u01c2"+ + "\u01c3\u0006\u0000\u0000\u0000\u01c3\u0011\u0001\u0000\u0000\u0000\u01c4"+ + "\u01c5\u0007\u0000\u0000\u0000\u01c5\u01c6\u0007\u0006\u0000\u0000\u01c6"+ + "\u01c7\u0007\u0007\u0000\u0000\u01c7\u01c8\u0007\b\u0000\u0000\u01c8\u01c9"+ + "\u0001\u0000\u0000\u0000\u01c9\u01ca\u0006\u0001\u0001\u0000\u01ca\u0013"+ + "\u0001\u0000\u0000\u0000\u01cb\u01cc\u0007\u0003\u0000\u0000\u01cc\u01cd"+ + "\u0007\t\u0000\u0000\u01cd\u01ce\u0007\u0006\u0000\u0000\u01ce\u01cf\u0007"+ + "\u0001\u0000\u0000\u01cf\u01d0\u0007\u0004\u0000\u0000\u01d0\u01d1\u0007"+ + "\n\u0000\u0000\u01d1\u01d2\u0001\u0000\u0000\u0000\u01d2\u01d3\u0006\u0002"+ + "\u0002\u0000\u01d3\u0015\u0001\u0000\u0000\u0000\u01d4\u01d5\u0007\u0003"+ + "\u0000\u0000\u01d5\u01d6\u0007\u000b\u0000\u0000\u01d6\u01d7\u0007\f\u0000"+ + "\u0000\u01d7\u01d8\u0007\r\u0000\u0000\u01d8\u01d9\u0001\u0000\u0000\u0000"+ + "\u01d9\u01da\u0006\u0003\u0000\u0000\u01da\u0017\u0001\u0000\u0000\u0000"+ + "\u01db\u01dc\u0007\u0003\u0000\u0000\u01dc\u01dd\u0007\u000e\u0000\u0000"+ + "\u01dd\u01de\u0007\b\u0000\u0000\u01de\u01df\u0007\r\u0000\u0000\u01df"+ + "\u01e0\u0007\f\u0000\u0000\u01e0\u01e1\u0007\u0001\u0000\u0000\u01e1\u01e2"+ + "\u0007\t\u0000\u0000\u01e2\u01e3\u0001\u0000\u0000\u0000\u01e3\u01e4\u0006"+ + "\u0004\u0003\u0000\u01e4\u0019\u0001\u0000\u0000\u0000\u01e5\u01e6\u0007"+ + "\u000f\u0000\u0000\u01e6\u01e7\u0007\u0006\u0000\u0000\u01e7\u01e8\u0007"+ + "\u0007\u0000\u0000\u01e8\u01e9\u0007\u0010\u0000\u0000\u01e9\u01ea\u0001"+ + "\u0000\u0000\u0000\u01ea\u01eb\u0006\u0005\u0004\u0000\u01eb\u001b\u0001"+ + "\u0000\u0000\u0000\u01ec\u01ed\u0007\u0011\u0000\u0000\u01ed\u01ee\u0007"+ + "\u0006\u0000\u0000\u01ee\u01ef\u0007\u0007\u0000\u0000\u01ef\u01f0\u0007"+ + "\u0012\u0000\u0000\u01f0\u01f1\u0001\u0000\u0000\u0000\u01f1\u01f2\u0006"+ + "\u0006\u0000\u0000\u01f2\u001d\u0001\u0000\u0000\u0000\u01f3\u01f4\u0007"+ + "\u0012\u0000\u0000\u01f4\u01f5\u0007\u0003\u0000\u0000\u01f5\u01f6\u0007"+ + "\u0003\u0000\u0000\u01f6\u01f7\u0007\b\u0000\u0000\u01f7\u01f8\u0001\u0000"+ + "\u0000\u0000\u01f8\u01f9\u0006\u0007\u0001\u0000\u01f9\u001f\u0001\u0000"+ + "\u0000\u0000\u01fa\u01fb\u0007\r\u0000\u0000\u01fb\u01fc\u0007\u0001\u0000"+ + "\u0000\u01fc\u01fd\u0007\u0010\u0000\u0000\u01fd\u01fe\u0007\u0001\u0000"+ + "\u0000\u01fe\u01ff\u0007\u0005\u0000\u0000\u01ff\u0200\u0001\u0000\u0000"+ + "\u0000\u0200\u0201\u0006\b\u0000\u0000\u0201!\u0001\u0000\u0000\u0000"+ + "\u0202\u0203\u0007\u0010\u0000\u0000\u0203\u0204\u0007\u000b\u0000\u0000"+ + "\u0204\u0205\u0005_\u0000\u0000\u0205\u0206\u0007\u0003\u0000\u0000\u0206"+ + "\u0207\u0007\u000e\u0000\u0000\u0207\u0208\u0007\b\u0000\u0000\u0208\u0209"+ + "\u0007\f\u0000\u0000\u0209\u020a\u0007\t\u0000\u0000\u020a\u020b\u0007"+ + "\u0000\u0000\u0000\u020b\u020c\u0001\u0000\u0000\u0000\u020c\u020d\u0006"+ + "\t\u0005\u0000\u020d#\u0001\u0000\u0000\u0000\u020e\u020f\u0007\u0006"+ + "\u0000\u0000\u020f\u0210\u0007\u0003\u0000\u0000\u0210\u0211\u0007\t\u0000"+ + "\u0000\u0211\u0212\u0007\f\u0000\u0000\u0212\u0213\u0007\u0010\u0000\u0000"+ + "\u0213\u0214\u0007\u0003\u0000\u0000\u0214\u0215\u0001\u0000\u0000\u0000"+ + "\u0215\u0216\u0006\n\u0006\u0000\u0216%\u0001\u0000\u0000\u0000\u0217"+ + "\u0218\u0007\u0006\u0000\u0000\u0218\u0219\u0007\u0007\u0000\u0000\u0219"+ + "\u021a\u0007\u0013\u0000\u0000\u021a\u021b\u0001\u0000\u0000\u0000\u021b"+ + "\u021c\u0006\u000b\u0000\u0000\u021c\'\u0001\u0000\u0000\u0000\u021d\u021e"+ + "\u0007\u0002\u0000\u0000\u021e\u021f\u0007\n\u0000\u0000\u021f\u0220\u0007"+ + "\u0007\u0000\u0000\u0220\u0221\u0007\u0013\u0000\u0000\u0221\u0222\u0001"+ + "\u0000\u0000\u0000\u0222\u0223\u0006\f\u0007\u0000\u0223)\u0001\u0000"+ + "\u0000\u0000\u0224\u0225\u0007\u0002\u0000\u0000\u0225\u0226\u0007\u0007"+ + "\u0000\u0000\u0226\u0227\u0007\u0006\u0000\u0000\u0227\u0228\u0007\u0005"+ + "\u0000\u0000\u0228\u0229\u0001\u0000\u0000\u0000\u0229\u022a\u0006\r\u0000"+ + "\u0000\u022a+\u0001\u0000\u0000\u0000\u022b\u022c\u0007\u0002\u0000\u0000"+ + "\u022c\u022d\u0007\u0005\u0000\u0000\u022d\u022e\u0007\f\u0000\u0000\u022e"+ + "\u022f\u0007\u0005\u0000\u0000\u022f\u0230\u0007\u0002\u0000\u0000\u0230"+ + "\u0231\u0001\u0000\u0000\u0000\u0231\u0232\u0006\u000e\u0000\u0000\u0232"+ + "-\u0001\u0000\u0000\u0000\u0233\u0234\u0007\u0013\u0000\u0000\u0234\u0235"+ + "\u0007\n\u0000\u0000\u0235\u0236\u0007\u0003\u0000\u0000\u0236\u0237\u0007"+ + "\u0006\u0000\u0000\u0237\u0238\u0007\u0003\u0000\u0000\u0238\u0239\u0001"+ + "\u0000\u0000\u0000\u0239\u023a\u0006\u000f\u0000\u0000\u023a/\u0001\u0000"+ + "\u0000\u0000\u023b\u023c\u0004\u0010\u0000\u0000\u023c\u023d\u0007\u0001"+ + "\u0000\u0000\u023d\u023e\u0007\t\u0000\u0000\u023e\u023f\u0007\r\u0000"+ + "\u0000\u023f\u0240\u0007\u0001\u0000\u0000\u0240\u0241\u0007\t\u0000\u0000"+ + "\u0241\u0242\u0007\u0003\u0000\u0000\u0242\u0243\u0007\u0002\u0000\u0000"+ + "\u0243\u0244\u0007\u0005\u0000\u0000\u0244\u0245\u0007\f\u0000\u0000\u0245"+ + "\u0246\u0007\u0005\u0000\u0000\u0246\u0247\u0007\u0002\u0000\u0000\u0247"+ + "\u0248\u0001\u0000\u0000\u0000\u0248\u0249\u0006\u0010\u0000\u0000\u0249"+ + "1\u0001\u0000\u0000\u0000\u024a\u024b\u0004\u0011\u0001\u0000\u024b\u024c"+ + "\u0007\r\u0000\u0000\u024c\u024d\u0007\u0007\u0000\u0000\u024d\u024e\u0007"+ + "\u0007\u0000\u0000\u024e\u024f\u0007\u0012\u0000\u0000\u024f\u0250\u0007"+ + "\u0014\u0000\u0000\u0250\u0251\u0007\b\u0000\u0000\u0251\u0252\u0005_"+ + "\u0000\u0000\u0252\u0253\u0005\u8001\uf414\u0000\u0000\u0253\u0254\u0001"+ + "\u0000\u0000\u0000\u0254\u0255\u0006\u0011\b\u0000\u02553\u0001\u0000"+ + "\u0000\u0000\u0256\u0257\u0004\u0012\u0002\u0000\u0257\u0258\u0007\u0010"+ + "\u0000\u0000\u0258\u0259\u0007\u0003\u0000\u0000\u0259\u025a\u0007\u0005"+ + "\u0000\u0000\u025a\u025b\u0007\u0006\u0000\u0000\u025b\u025c\u0007\u0001"+ + "\u0000\u0000\u025c\u025d\u0007\u0004\u0000\u0000\u025d\u025e\u0007\u0002"+ + "\u0000\u0000\u025e\u025f\u0001\u0000\u0000\u0000\u025f\u0260\u0006\u0012"+ + "\t\u0000\u02605\u0001\u0000\u0000\u0000\u0261\u0262\u0004\u0013\u0003"+ + "\u0000\u0262\u0263\u0007\u0015\u0000\u0000\u0263\u0264\u0007\u0007\u0000"+ + "\u0000\u0264\u0265\u0007\u0001\u0000\u0000\u0265\u0266\u0007\t\u0000\u0000"+ + "\u0266\u0267\u0001\u0000\u0000\u0000\u0267\u0268\u0006\u0013\n\u0000\u0268"+ + "7\u0001\u0000\u0000\u0000\u0269\u026a\u0004\u0014\u0004\u0000\u026a\u026b"+ + "\u0007\u000f\u0000\u0000\u026b\u026c\u0007\u0014\u0000\u0000\u026c\u026d"+ + "\u0007\r\u0000\u0000\u026d\u026e\u0007\r\u0000\u0000\u026e\u026f\u0001"+ + "\u0000\u0000\u0000\u026f\u0270\u0006\u0014\n\u0000\u02709\u0001\u0000"+ + "\u0000\u0000\u0271\u0272\u0004\u0015\u0005\u0000\u0272\u0273\u0007\r\u0000"+ + "\u0000\u0273\u0274\u0007\u0003\u0000\u0000\u0274\u0275\u0007\u000f\u0000"+ + "\u0000\u0275\u0276\u0007\u0005\u0000\u0000\u0276\u0277\u0001\u0000\u0000"+ + "\u0000\u0277\u0278\u0006\u0015\n\u0000\u0278;\u0001\u0000\u0000\u0000"+ + "\u0279\u027a\u0004\u0016\u0006\u0000\u027a\u027b\u0007\u0006\u0000\u0000"+ + "\u027b\u027c\u0007\u0001\u0000\u0000\u027c\u027d\u0007\u0011\u0000\u0000"+ + "\u027d\u027e\u0007\n\u0000\u0000\u027e\u027f\u0007\u0005\u0000\u0000\u027f"+ + "\u0280\u0001\u0000\u0000\u0000\u0280\u0281\u0006\u0016\n\u0000\u0281="+ + "\u0001\u0000\u0000\u0000\u0282\u0283\u0004\u0017\u0007\u0000\u0283\u0284"+ + "\u0007\r\u0000\u0000\u0284\u0285\u0007\u0007\u0000\u0000\u0285\u0286\u0007"+ + "\u0007\u0000\u0000\u0286\u0287\u0007\u0012\u0000\u0000\u0287\u0288\u0007"+ + "\u0014\u0000\u0000\u0288\u0289\u0007\b\u0000\u0000\u0289\u028a\u0001\u0000"+ + "\u0000\u0000\u028a\u028b\u0006\u0017\n\u0000\u028b?\u0001\u0000\u0000"+ + "\u0000\u028c\u028e\b\u0016\u0000\u0000\u028d\u028c\u0001\u0000\u0000\u0000"+ + "\u028e\u028f\u0001\u0000\u0000\u0000\u028f\u028d\u0001\u0000\u0000\u0000"+ + "\u028f\u0290\u0001\u0000\u0000\u0000\u0290\u0291\u0001\u0000\u0000\u0000"+ + "\u0291\u0292\u0006\u0018\u0000\u0000\u0292A\u0001\u0000\u0000\u0000\u0293"+ + "\u0294\u0005/\u0000\u0000\u0294\u0295\u0005/\u0000\u0000\u0295\u0299\u0001"+ + "\u0000\u0000\u0000\u0296\u0298\b\u0017\u0000\u0000\u0297\u0296\u0001\u0000"+ + "\u0000\u0000\u0298\u029b\u0001\u0000\u0000\u0000\u0299\u0297\u0001\u0000"+ + "\u0000\u0000\u0299\u029a\u0001\u0000\u0000\u0000\u029a\u029d\u0001\u0000"+ + "\u0000\u0000\u029b\u0299\u0001\u0000\u0000\u0000\u029c\u029e\u0005\r\u0000"+ + "\u0000\u029d\u029c\u0001\u0000\u0000\u0000\u029d\u029e\u0001\u0000\u0000"+ + "\u0000\u029e\u02a0\u0001\u0000\u0000\u0000\u029f\u02a1\u0005\n\u0000\u0000"+ + "\u02a0\u029f\u0001\u0000\u0000\u0000\u02a0\u02a1\u0001\u0000\u0000\u0000"+ + "\u02a1\u02a2\u0001\u0000\u0000\u0000\u02a2\u02a3\u0006\u0019\u000b\u0000"+ + "\u02a3C\u0001\u0000\u0000\u0000\u02a4\u02a5\u0005/\u0000\u0000\u02a5\u02a6"+ + "\u0005*\u0000\u0000\u02a6\u02ab\u0001\u0000\u0000\u0000\u02a7\u02aa\u0003"+ + "D\u001a\u0000\u02a8\u02aa\t\u0000\u0000\u0000\u02a9\u02a7\u0001\u0000"+ + "\u0000\u0000\u02a9\u02a8\u0001\u0000\u0000\u0000\u02aa\u02ad\u0001\u0000"+ + "\u0000\u0000\u02ab\u02ac\u0001\u0000\u0000\u0000\u02ab\u02a9\u0001\u0000"+ + "\u0000\u0000\u02ac\u02ae\u0001\u0000\u0000\u0000\u02ad\u02ab\u0001\u0000"+ + "\u0000\u0000\u02ae\u02af\u0005*\u0000\u0000\u02af\u02b0\u0005/\u0000\u0000"+ + "\u02b0\u02b1\u0001\u0000\u0000\u0000\u02b1\u02b2\u0006\u001a\u000b\u0000"+ + "\u02b2E\u0001\u0000\u0000\u0000\u02b3\u02b5\u0007\u0018\u0000\u0000\u02b4"+ + "\u02b3\u0001\u0000\u0000\u0000\u02b5\u02b6\u0001\u0000\u0000\u0000\u02b6"+ + "\u02b4\u0001\u0000\u0000\u0000\u02b6\u02b7\u0001\u0000\u0000\u0000\u02b7"+ + "\u02b8\u0001\u0000\u0000\u0000\u02b8\u02b9\u0006\u001b\u000b\u0000\u02b9"+ + "G\u0001\u0000\u0000\u0000\u02ba\u02bb\u0005|\u0000\u0000\u02bb\u02bc\u0001"+ + "\u0000\u0000\u0000\u02bc\u02bd\u0006\u001c\f\u0000\u02bdI\u0001\u0000"+ + "\u0000\u0000\u02be\u02bf\u0007\u0019\u0000\u0000\u02bfK\u0001\u0000\u0000"+ + "\u0000\u02c0\u02c1\u0007\u001a\u0000\u0000\u02c1M\u0001\u0000\u0000\u0000"+ + "\u02c2\u02c3\u0005\\\u0000\u0000\u02c3\u02c4\u0007\u001b\u0000\u0000\u02c4"+ + "O\u0001\u0000\u0000\u0000\u02c5\u02c6\b\u001c\u0000\u0000\u02c6Q\u0001"+ + "\u0000\u0000\u0000\u02c7\u02c9\u0007\u0003\u0000\u0000\u02c8\u02ca\u0007"+ + "\u001d\u0000\u0000\u02c9\u02c8\u0001\u0000\u0000\u0000\u02c9\u02ca\u0001"+ + "\u0000\u0000\u0000\u02ca\u02cc\u0001\u0000\u0000\u0000\u02cb\u02cd\u0003"+ + "J\u001d\u0000\u02cc\u02cb\u0001\u0000\u0000\u0000\u02cd\u02ce\u0001\u0000"+ + "\u0000\u0000\u02ce\u02cc\u0001\u0000\u0000\u0000\u02ce\u02cf\u0001\u0000"+ + "\u0000\u0000\u02cfS\u0001\u0000\u0000\u0000\u02d0\u02d1\u0005@\u0000\u0000"+ + "\u02d1U\u0001\u0000\u0000\u0000\u02d2\u02d3\u0005`\u0000\u0000\u02d3W"+ + "\u0001\u0000\u0000\u0000\u02d4\u02d8\b\u001e\u0000\u0000\u02d5\u02d6\u0005"+ + "`\u0000\u0000\u02d6\u02d8\u0005`\u0000\u0000\u02d7\u02d4\u0001\u0000\u0000"+ + "\u0000\u02d7\u02d5\u0001\u0000\u0000\u0000\u02d8Y\u0001\u0000\u0000\u0000"+ + "\u02d9\u02da\u0005_\u0000\u0000\u02da[\u0001\u0000\u0000\u0000\u02db\u02df"+ + "\u0003L\u001e\u0000\u02dc\u02df\u0003J\u001d\u0000\u02dd\u02df\u0003Z"+ + "%\u0000\u02de\u02db\u0001\u0000\u0000\u0000\u02de\u02dc\u0001\u0000\u0000"+ + "\u0000\u02de\u02dd\u0001\u0000\u0000\u0000\u02df]\u0001\u0000\u0000\u0000"+ + "\u02e0\u02e5\u0005\"\u0000\u0000\u02e1\u02e4\u0003N\u001f\u0000\u02e2"+ + "\u02e4\u0003P \u0000\u02e3\u02e1\u0001\u0000\u0000\u0000\u02e3\u02e2\u0001"+ + "\u0000\u0000\u0000\u02e4\u02e7\u0001\u0000\u0000\u0000\u02e5\u02e3\u0001"+ + "\u0000\u0000\u0000\u02e5\u02e6\u0001\u0000\u0000\u0000\u02e6\u02e8\u0001"+ + "\u0000\u0000\u0000\u02e7\u02e5\u0001\u0000\u0000\u0000\u02e8\u02fe\u0005"+ + "\"\u0000\u0000\u02e9\u02ea\u0005\"\u0000\u0000\u02ea\u02eb\u0005\"\u0000"+ + "\u0000\u02eb\u02ec\u0005\"\u0000\u0000\u02ec\u02f0\u0001\u0000\u0000\u0000"+ + "\u02ed\u02ef\b\u0017\u0000\u0000\u02ee\u02ed\u0001\u0000\u0000\u0000\u02ef"+ + "\u02f2\u0001\u0000\u0000\u0000\u02f0\u02f1\u0001\u0000\u0000\u0000\u02f0"+ + "\u02ee\u0001\u0000\u0000\u0000\u02f1\u02f3\u0001\u0000\u0000\u0000\u02f2"+ + "\u02f0\u0001\u0000\u0000\u0000\u02f3\u02f4\u0005\"\u0000\u0000\u02f4\u02f5"+ + "\u0005\"\u0000\u0000\u02f5\u02f6\u0005\"\u0000\u0000\u02f6\u02f8\u0001"+ + "\u0000\u0000\u0000\u02f7\u02f9\u0005\"\u0000\u0000\u02f8\u02f7\u0001\u0000"+ + "\u0000\u0000\u02f8\u02f9\u0001\u0000\u0000\u0000\u02f9\u02fb\u0001\u0000"+ + "\u0000\u0000\u02fa\u02fc\u0005\"\u0000\u0000\u02fb\u02fa\u0001\u0000\u0000"+ + "\u0000\u02fb\u02fc\u0001\u0000\u0000\u0000\u02fc\u02fe\u0001\u0000\u0000"+ + "\u0000\u02fd\u02e0\u0001\u0000\u0000\u0000\u02fd\u02e9\u0001\u0000\u0000"+ + "\u0000\u02fe_\u0001\u0000\u0000\u0000\u02ff\u0301\u0003J\u001d\u0000\u0300"+ + "\u02ff\u0001\u0000\u0000\u0000\u0301\u0302\u0001\u0000\u0000\u0000\u0302"+ + "\u0300\u0001\u0000\u0000\u0000\u0302\u0303\u0001\u0000\u0000\u0000\u0303"+ + "a\u0001\u0000\u0000\u0000\u0304\u0306\u0003J\u001d\u0000\u0305\u0304\u0001"+ + "\u0000\u0000\u0000\u0306\u0307\u0001\u0000\u0000\u0000\u0307\u0305\u0001"+ + "\u0000\u0000\u0000\u0307\u0308\u0001\u0000\u0000\u0000\u0308\u0309\u0001"+ + "\u0000\u0000\u0000\u0309\u030d\u0003t2\u0000\u030a\u030c\u0003J\u001d"+ + "\u0000\u030b\u030a\u0001\u0000\u0000\u0000\u030c\u030f\u0001\u0000\u0000"+ + "\u0000\u030d\u030b\u0001\u0000\u0000\u0000\u030d\u030e\u0001\u0000\u0000"+ + "\u0000\u030e\u032f\u0001\u0000\u0000\u0000\u030f\u030d\u0001\u0000\u0000"+ + "\u0000\u0310\u0312\u0003t2\u0000\u0311\u0313\u0003J\u001d\u0000\u0312"+ + "\u0311\u0001\u0000\u0000\u0000\u0313\u0314\u0001\u0000\u0000\u0000\u0314"+ + "\u0312\u0001\u0000\u0000\u0000\u0314\u0315\u0001\u0000\u0000\u0000\u0315"+ + "\u032f\u0001\u0000\u0000\u0000\u0316\u0318\u0003J\u001d\u0000\u0317\u0316"+ + "\u0001\u0000\u0000\u0000\u0318\u0319\u0001\u0000\u0000\u0000\u0319\u0317"+ + "\u0001\u0000\u0000\u0000\u0319\u031a\u0001\u0000\u0000\u0000\u031a\u0322"+ + "\u0001\u0000\u0000\u0000\u031b\u031f\u0003t2\u0000\u031c\u031e\u0003J"+ + "\u001d\u0000\u031d\u031c\u0001\u0000\u0000\u0000\u031e\u0321\u0001\u0000"+ + "\u0000\u0000\u031f\u031d\u0001\u0000\u0000\u0000\u031f\u0320\u0001\u0000"+ + "\u0000\u0000\u0320\u0323\u0001\u0000\u0000\u0000\u0321\u031f\u0001\u0000"+ + "\u0000\u0000\u0322\u031b\u0001\u0000\u0000\u0000\u0322\u0323\u0001\u0000"+ + "\u0000\u0000\u0323\u0324\u0001\u0000\u0000\u0000\u0324\u0325\u0003R!\u0000"+ + "\u0325\u032f\u0001\u0000\u0000\u0000\u0326\u0328\u0003t2\u0000\u0327\u0329"+ + "\u0003J\u001d\u0000\u0328\u0327\u0001\u0000\u0000\u0000\u0329\u032a\u0001"+ + "\u0000\u0000\u0000\u032a\u0328\u0001\u0000\u0000\u0000\u032a\u032b\u0001"+ + "\u0000\u0000\u0000\u032b\u032c\u0001\u0000\u0000\u0000\u032c\u032d\u0003"+ + "R!\u0000\u032d\u032f\u0001\u0000\u0000\u0000\u032e\u0305\u0001\u0000\u0000"+ + "\u0000\u032e\u0310\u0001\u0000\u0000\u0000\u032e\u0317\u0001\u0000\u0000"+ + "\u0000\u032e\u0326\u0001\u0000\u0000\u0000\u032fc\u0001\u0000\u0000\u0000"+ + "\u0330\u0331\u0007\u001f\u0000\u0000\u0331\u0332\u0007 \u0000\u0000\u0332"+ + "e\u0001\u0000\u0000\u0000\u0333\u0334\u0007\f\u0000\u0000\u0334\u0335"+ + "\u0007\t\u0000\u0000\u0335\u0336\u0007\u0000\u0000\u0000\u0336g\u0001"+ + "\u0000\u0000\u0000\u0337\u0338\u0007\f\u0000\u0000\u0338\u0339\u0007\u0002"+ + "\u0000\u0000\u0339\u033a\u0007\u0004\u0000\u0000\u033ai\u0001\u0000\u0000"+ + "\u0000\u033b\u033c\u0005=\u0000\u0000\u033ck\u0001\u0000\u0000\u0000\u033d"+ + "\u033e\u0005:\u0000\u0000\u033e\u033f\u0005:\u0000\u0000\u033fm\u0001"+ + "\u0000\u0000\u0000\u0340\u0341\u0005:\u0000\u0000\u0341o\u0001\u0000\u0000"+ + "\u0000\u0342\u0343\u0005,\u0000\u0000\u0343q\u0001\u0000\u0000\u0000\u0344"+ + "\u0345\u0007\u0000\u0000\u0000\u0345\u0346\u0007\u0003\u0000\u0000\u0346"+ + "\u0347\u0007\u0002\u0000\u0000\u0347\u0348\u0007\u0004\u0000\u0000\u0348"+ + "s\u0001\u0000\u0000\u0000\u0349\u034a\u0005.\u0000\u0000\u034au\u0001"+ + "\u0000\u0000\u0000\u034b\u034c\u0007\u000f\u0000\u0000\u034c\u034d\u0007"+ + "\f\u0000\u0000\u034d\u034e\u0007\r\u0000\u0000\u034e\u034f\u0007\u0002"+ + "\u0000\u0000\u034f\u0350\u0007\u0003\u0000\u0000\u0350w\u0001\u0000\u0000"+ + "\u0000\u0351\u0352\u0007\u000f\u0000\u0000\u0352\u0353\u0007\u0001\u0000"+ + "\u0000\u0353\u0354\u0007\u0006\u0000\u0000\u0354\u0355\u0007\u0002\u0000"+ + "\u0000\u0355\u0356\u0007\u0005\u0000\u0000\u0356y\u0001\u0000\u0000\u0000"+ + "\u0357\u0358\u0007\u0001\u0000\u0000\u0358\u0359\u0007\t\u0000\u0000\u0359"+ + "{\u0001\u0000\u0000\u0000\u035a\u035b\u0007\u0001\u0000\u0000\u035b\u035c"+ + "\u0007\u0002\u0000\u0000\u035c}\u0001\u0000\u0000\u0000\u035d\u035e\u0007"+ + "\r\u0000\u0000\u035e\u035f\u0007\f\u0000\u0000\u035f\u0360\u0007\u0002"+ + "\u0000\u0000\u0360\u0361\u0007\u0005\u0000\u0000\u0361\u007f\u0001\u0000"+ + "\u0000\u0000\u0362\u0363\u0007\r\u0000\u0000\u0363\u0364\u0007\u0001\u0000"+ + "\u0000\u0364\u0365\u0007\u0012\u0000\u0000\u0365\u0366\u0007\u0003\u0000"+ + "\u0000\u0366\u0081\u0001\u0000\u0000\u0000\u0367\u0368\u0005(\u0000\u0000"+ + "\u0368\u0083\u0001\u0000\u0000\u0000\u0369\u036a\u0007\t\u0000\u0000\u036a"+ + "\u036b\u0007\u0007\u0000\u0000\u036b\u036c\u0007\u0005\u0000\u0000\u036c"+ + "\u0085\u0001\u0000\u0000\u0000\u036d\u036e\u0007\t\u0000\u0000\u036e\u036f"+ + "\u0007\u0014\u0000\u0000\u036f\u0370\u0007\r\u0000\u0000\u0370\u0371\u0007"+ + "\r\u0000\u0000\u0371\u0087\u0001\u0000\u0000\u0000\u0372\u0373\u0007\t"+ + "\u0000\u0000\u0373\u0374\u0007\u0014\u0000\u0000\u0374\u0375\u0007\r\u0000"+ + "\u0000\u0375\u0376\u0007\r\u0000\u0000\u0376\u0377\u0007\u0002\u0000\u0000"+ + "\u0377\u0089\u0001\u0000\u0000\u0000\u0378\u0379\u0007\u0007\u0000\u0000"+ + "\u0379\u037a\u0007\u0006\u0000\u0000\u037a\u008b\u0001\u0000\u0000\u0000"+ + "\u037b\u037c\u0005?\u0000\u0000\u037c\u008d\u0001\u0000\u0000\u0000\u037d"+ + "\u037e\u0007\u0006\u0000\u0000\u037e\u037f\u0007\r\u0000\u0000\u037f\u0380"+ + "\u0007\u0001\u0000\u0000\u0380\u0381\u0007\u0012\u0000\u0000\u0381\u0382"+ + "\u0007\u0003\u0000\u0000\u0382\u008f\u0001\u0000\u0000\u0000\u0383\u0384"+ + "\u0005)\u0000\u0000\u0384\u0091\u0001\u0000\u0000\u0000\u0385\u0386\u0007"+ + "\u0005\u0000\u0000\u0386\u0387\u0007\u0006\u0000\u0000\u0387\u0388\u0007"+ + "\u0014\u0000\u0000\u0388\u0389\u0007\u0003\u0000\u0000\u0389\u0093\u0001"+ + "\u0000\u0000\u0000\u038a\u038b\u0005=\u0000\u0000\u038b\u038c\u0005=\u0000"+ + "\u0000\u038c\u0095\u0001\u0000\u0000\u0000\u038d\u038e\u0005=\u0000\u0000"+ + "\u038e\u038f\u0005~\u0000\u0000\u038f\u0097\u0001\u0000\u0000\u0000\u0390"+ + "\u0391\u0005!\u0000\u0000\u0391\u0392\u0005=\u0000\u0000\u0392\u0099\u0001"+ + "\u0000\u0000\u0000\u0393\u0394\u0005<\u0000\u0000\u0394\u009b\u0001\u0000"+ + "\u0000\u0000\u0395\u0396\u0005<\u0000\u0000\u0396\u0397\u0005=\u0000\u0000"+ + "\u0397\u009d\u0001\u0000\u0000\u0000\u0398\u0399\u0005>\u0000\u0000\u0399"+ + "\u009f\u0001\u0000\u0000\u0000\u039a\u039b\u0005>\u0000\u0000\u039b\u039c"+ + "\u0005=\u0000\u0000\u039c\u00a1\u0001\u0000\u0000\u0000\u039d\u039e\u0005"+ + "+\u0000\u0000\u039e\u00a3\u0001\u0000\u0000\u0000\u039f\u03a0\u0005-\u0000"+ + "\u0000\u03a0\u00a5\u0001\u0000\u0000\u0000\u03a1\u03a2\u0005*\u0000\u0000"+ + "\u03a2\u00a7\u0001\u0000\u0000\u0000\u03a3\u03a4\u0005/\u0000\u0000\u03a4"+ + "\u00a9\u0001\u0000\u0000\u0000\u03a5\u03a6\u0005%\u0000\u0000\u03a6\u00ab"+ + "\u0001\u0000\u0000\u0000\u03a7\u03a8\u0003.\u000f\u0000\u03a8\u03a9\u0001"+ + "\u0000\u0000\u0000\u03a9\u03aa\u0006N\r\u0000\u03aa\u00ad\u0001\u0000"+ + "\u0000\u0000\u03ab\u03ae\u0003\u008c>\u0000\u03ac\u03af\u0003L\u001e\u0000"+ + "\u03ad\u03af\u0003Z%\u0000\u03ae\u03ac\u0001\u0000\u0000\u0000\u03ae\u03ad"+ + "\u0001\u0000\u0000\u0000\u03af\u03b3\u0001\u0000\u0000\u0000\u03b0\u03b2"+ + "\u0003\\&\u0000\u03b1\u03b0\u0001\u0000\u0000\u0000\u03b2\u03b5\u0001"+ + "\u0000\u0000\u0000\u03b3\u03b1\u0001\u0000\u0000\u0000\u03b3\u03b4\u0001"+ + "\u0000\u0000\u0000\u03b4\u03bd\u0001\u0000\u0000\u0000\u03b5\u03b3\u0001"+ + "\u0000\u0000\u0000\u03b6\u03b8\u0003\u008c>\u0000\u03b7\u03b9\u0003J\u001d"+ + "\u0000\u03b8\u03b7\u0001\u0000\u0000\u0000\u03b9\u03ba\u0001\u0000\u0000"+ + "\u0000\u03ba\u03b8\u0001\u0000\u0000\u0000\u03ba\u03bb\u0001\u0000\u0000"+ + "\u0000\u03bb\u03bd\u0001\u0000\u0000\u0000\u03bc\u03ab\u0001\u0000\u0000"+ + "\u0000\u03bc\u03b6\u0001\u0000\u0000\u0000\u03bd\u00af\u0001\u0000\u0000"+ + "\u0000\u03be\u03bf\u0005[\u0000\u0000\u03bf\u03c0\u0001\u0000\u0000\u0000"+ + "\u03c0\u03c1\u0006P\u0000\u0000\u03c1\u03c2\u0006P\u0000\u0000\u03c2\u00b1"+ + "\u0001\u0000\u0000\u0000\u03c3\u03c4\u0005]\u0000\u0000\u03c4\u03c5\u0001"+ + "\u0000\u0000\u0000\u03c5\u03c6\u0006Q\f\u0000\u03c6\u03c7\u0006Q\f\u0000"+ + "\u03c7\u00b3\u0001\u0000\u0000\u0000\u03c8\u03cc\u0003L\u001e\u0000\u03c9"+ + "\u03cb\u0003\\&\u0000\u03ca\u03c9\u0001\u0000\u0000\u0000\u03cb\u03ce"+ + "\u0001\u0000\u0000\u0000\u03cc\u03ca\u0001\u0000\u0000\u0000\u03cc\u03cd"+ + "\u0001\u0000\u0000\u0000\u03cd\u03d9\u0001\u0000\u0000\u0000\u03ce\u03cc"+ + "\u0001\u0000\u0000\u0000\u03cf\u03d2\u0003Z%\u0000\u03d0\u03d2\u0003T"+ + "\"\u0000\u03d1\u03cf\u0001\u0000\u0000\u0000\u03d1\u03d0\u0001\u0000\u0000"+ + "\u0000\u03d2\u03d4\u0001\u0000\u0000\u0000\u03d3\u03d5\u0003\\&\u0000"+ + "\u03d4\u03d3\u0001\u0000\u0000\u0000\u03d5\u03d6\u0001\u0000\u0000\u0000"+ + "\u03d6\u03d4\u0001\u0000\u0000\u0000\u03d6\u03d7\u0001\u0000\u0000\u0000"+ + "\u03d7\u03d9\u0001\u0000\u0000\u0000\u03d8\u03c8\u0001\u0000\u0000\u0000"+ + "\u03d8\u03d1\u0001\u0000\u0000\u0000\u03d9\u00b5\u0001\u0000\u0000\u0000"+ + "\u03da\u03dc\u0003V#\u0000\u03db\u03dd\u0003X$\u0000\u03dc\u03db\u0001"+ + "\u0000\u0000\u0000\u03dd\u03de\u0001\u0000\u0000\u0000\u03de\u03dc\u0001"+ + "\u0000\u0000\u0000\u03de\u03df\u0001\u0000\u0000\u0000\u03df\u03e0\u0001"+ + "\u0000\u0000\u0000\u03e0\u03e1\u0003V#\u0000\u03e1\u00b7\u0001\u0000\u0000"+ + "\u0000\u03e2\u03e3\u0003\u00b6S\u0000\u03e3\u00b9\u0001\u0000\u0000\u0000"+ + "\u03e4\u03e5\u0003B\u0019\u0000\u03e5\u03e6\u0001\u0000\u0000\u0000\u03e6"+ + "\u03e7\u0006U\u000b\u0000\u03e7\u00bb\u0001\u0000\u0000\u0000\u03e8\u03e9"+ + "\u0003D\u001a\u0000\u03e9\u03ea\u0001\u0000\u0000\u0000\u03ea\u03eb\u0006"+ + "V\u000b\u0000\u03eb\u00bd\u0001\u0000\u0000\u0000\u03ec\u03ed\u0003F\u001b"+ + "\u0000\u03ed\u03ee\u0001\u0000\u0000\u0000\u03ee\u03ef\u0006W\u000b\u0000"+ + "\u03ef\u00bf\u0001\u0000\u0000\u0000\u03f0\u03f1\u0003\u00b0P\u0000\u03f1"+ + "\u03f2\u0001\u0000\u0000\u0000\u03f2\u03f3\u0006X\u000e\u0000\u03f3\u03f4"+ + "\u0006X\u000f\u0000\u03f4\u00c1\u0001\u0000\u0000\u0000\u03f5\u03f6\u0003"+ + "H\u001c\u0000\u03f6\u03f7\u0001\u0000\u0000\u0000\u03f7\u03f8\u0006Y\u0010"+ + "\u0000\u03f8\u03f9\u0006Y\f\u0000\u03f9\u00c3\u0001\u0000\u0000\u0000"+ + "\u03fa\u03fb\u0003F\u001b\u0000\u03fb\u03fc\u0001\u0000\u0000\u0000\u03fc"+ + "\u03fd\u0006Z\u000b\u0000\u03fd\u00c5\u0001\u0000\u0000\u0000\u03fe\u03ff"+ + "\u0003B\u0019\u0000\u03ff\u0400\u0001\u0000\u0000\u0000\u0400\u0401\u0006"+ + "[\u000b\u0000\u0401\u00c7\u0001\u0000\u0000\u0000\u0402\u0403\u0003D\u001a"+ + "\u0000\u0403\u0404\u0001\u0000\u0000\u0000\u0404\u0405\u0006\\\u000b\u0000"+ + "\u0405\u00c9\u0001\u0000\u0000\u0000\u0406\u0407\u0003H\u001c\u0000\u0407"+ + "\u0408\u0001\u0000\u0000\u0000\u0408\u0409\u0006]\u0010\u0000\u0409\u040a"+ + "\u0006]\f\u0000\u040a\u00cb\u0001\u0000\u0000\u0000\u040b\u040c\u0003"+ + "\u00b0P\u0000\u040c\u040d\u0001\u0000\u0000\u0000\u040d\u040e\u0006^\u000e"+ + "\u0000\u040e\u00cd\u0001\u0000\u0000\u0000\u040f\u0410\u0003\u00b2Q\u0000"+ + "\u0410\u0411\u0001\u0000\u0000\u0000\u0411\u0412\u0006_\u0011\u0000\u0412"+ + "\u00cf\u0001\u0000\u0000\u0000\u0413\u0414\u0003n/\u0000\u0414\u0415\u0001"+ + "\u0000\u0000\u0000\u0415\u0416\u0006`\u0012\u0000\u0416\u00d1\u0001\u0000"+ + "\u0000\u0000\u0417\u0418\u0003p0\u0000\u0418\u0419\u0001\u0000\u0000\u0000"+ + "\u0419\u041a\u0006a\u0013\u0000\u041a\u00d3\u0001\u0000\u0000\u0000\u041b"+ + "\u041c\u0003j-\u0000\u041c\u041d\u0001\u0000\u0000\u0000\u041d\u041e\u0006"+ + "b\u0014\u0000\u041e\u00d5\u0001\u0000\u0000\u0000\u041f\u0420\u0007\u0010"+ + "\u0000\u0000\u0420\u0421\u0007\u0003\u0000\u0000\u0421\u0422\u0007\u0005"+ + "\u0000\u0000\u0422\u0423\u0007\f\u0000\u0000\u0423\u0424\u0007\u0000\u0000"+ + "\u0000\u0424\u0425\u0007\f\u0000\u0000\u0425\u0426\u0007\u0005\u0000\u0000"+ + "\u0426\u0427\u0007\f\u0000\u0000\u0427\u00d7\u0001\u0000\u0000\u0000\u0428"+ + "\u042c\b!\u0000\u0000\u0429\u042a\u0005/\u0000\u0000\u042a\u042c\b\"\u0000"+ + "\u0000\u042b\u0428\u0001\u0000\u0000\u0000\u042b\u0429\u0001\u0000\u0000"+ + "\u0000\u042c\u00d9\u0001\u0000\u0000\u0000\u042d\u042f\u0003\u00d8d\u0000"+ + "\u042e\u042d\u0001\u0000\u0000\u0000\u042f\u0430\u0001\u0000\u0000\u0000"+ + "\u0430\u042e\u0001\u0000\u0000\u0000\u0430\u0431\u0001\u0000\u0000\u0000"+ + "\u0431\u00db\u0001\u0000\u0000\u0000\u0432\u0433\u0003\u00dae\u0000\u0433"+ + "\u0434\u0001\u0000\u0000\u0000\u0434\u0435\u0006f\u0015\u0000\u0435\u00dd"+ + "\u0001\u0000\u0000\u0000\u0436\u0437\u0003^\'\u0000\u0437\u0438\u0001"+ + "\u0000\u0000\u0000\u0438\u0439\u0006g\u0016\u0000\u0439\u00df\u0001\u0000"+ + "\u0000\u0000\u043a\u043b\u0003B\u0019\u0000\u043b\u043c\u0001\u0000\u0000"+ + "\u0000\u043c\u043d\u0006h\u000b\u0000\u043d\u00e1\u0001\u0000\u0000\u0000"+ + "\u043e\u043f\u0003D\u001a\u0000\u043f\u0440\u0001\u0000\u0000\u0000\u0440"+ + "\u0441\u0006i\u000b\u0000\u0441\u00e3\u0001\u0000\u0000\u0000\u0442\u0443"+ + "\u0003F\u001b\u0000\u0443\u0444\u0001\u0000\u0000\u0000\u0444\u0445\u0006"+ + "j\u000b\u0000\u0445\u00e5\u0001\u0000\u0000\u0000\u0446\u0447\u0003H\u001c"+ + "\u0000\u0447\u0448\u0001\u0000\u0000\u0000\u0448\u0449\u0006k\u0010\u0000"+ + "\u0449\u044a\u0006k\f\u0000\u044a\u00e7\u0001\u0000\u0000\u0000\u044b"+ + "\u044c\u0003t2\u0000\u044c\u044d\u0001\u0000\u0000\u0000\u044d\u044e\u0006"+ + "l\u0017\u0000\u044e\u00e9\u0001\u0000\u0000\u0000\u044f\u0450\u0003p0"+ + "\u0000\u0450\u0451\u0001\u0000\u0000\u0000\u0451\u0452\u0006m\u0013\u0000"+ + "\u0452\u00eb\u0001\u0000\u0000\u0000\u0453\u0454\u0004n\b\u0000\u0454"+ + "\u0455\u0003\u008c>\u0000\u0455\u0456\u0001\u0000\u0000\u0000\u0456\u0457"+ + "\u0006n\u0018\u0000\u0457\u00ed\u0001\u0000\u0000\u0000\u0458\u0459\u0004"+ + "o\t\u0000\u0459\u045a\u0003\u00aeO\u0000\u045a\u045b\u0001\u0000\u0000"+ + "\u0000\u045b\u045c\u0006o\u0019\u0000\u045c\u00ef\u0001\u0000\u0000\u0000"+ + "\u045d\u0462\u0003L\u001e\u0000\u045e\u0462\u0003J\u001d\u0000\u045f\u0462"+ + "\u0003Z%\u0000\u0460\u0462\u0003\u00a6K\u0000\u0461\u045d\u0001\u0000"+ + "\u0000\u0000\u0461\u045e\u0001\u0000\u0000\u0000\u0461\u045f\u0001\u0000"+ + "\u0000\u0000\u0461\u0460\u0001\u0000\u0000\u0000\u0462\u00f1\u0001\u0000"+ + "\u0000\u0000\u0463\u0466\u0003L\u001e\u0000\u0464\u0466\u0003\u00a6K\u0000"+ + "\u0465\u0463\u0001\u0000\u0000\u0000\u0465\u0464\u0001\u0000\u0000\u0000"+ + "\u0466\u046a\u0001\u0000\u0000\u0000\u0467\u0469\u0003\u00f0p\u0000\u0468"+ + "\u0467\u0001\u0000\u0000\u0000\u0469\u046c\u0001\u0000\u0000\u0000\u046a"+ + "\u0468\u0001\u0000\u0000\u0000\u046a\u046b\u0001\u0000\u0000\u0000\u046b"+ + "\u0477\u0001\u0000\u0000\u0000\u046c\u046a\u0001\u0000\u0000\u0000\u046d"+ + "\u0470\u0003Z%\u0000\u046e\u0470\u0003T\"\u0000\u046f\u046d\u0001\u0000"+ + "\u0000\u0000\u046f\u046e\u0001\u0000\u0000\u0000\u0470\u0472\u0001\u0000"+ + "\u0000\u0000\u0471\u0473\u0003\u00f0p\u0000\u0472\u0471\u0001\u0000\u0000"+ + "\u0000\u0473\u0474\u0001\u0000\u0000\u0000\u0474\u0472\u0001\u0000\u0000"+ + "\u0000\u0474\u0475\u0001\u0000\u0000\u0000\u0475\u0477\u0001\u0000\u0000"+ + "\u0000\u0476\u0465\u0001\u0000\u0000\u0000\u0476\u046f\u0001\u0000\u0000"+ + "\u0000\u0477\u00f3\u0001\u0000\u0000\u0000\u0478\u047b\u0003\u00f2q\u0000"+ + "\u0479\u047b\u0003\u00b6S\u0000\u047a\u0478\u0001\u0000\u0000\u0000\u047a"+ + "\u0479\u0001\u0000\u0000\u0000\u047b\u047c\u0001\u0000\u0000\u0000\u047c"+ + "\u047a\u0001\u0000\u0000\u0000\u047c\u047d\u0001\u0000\u0000\u0000\u047d"+ + "\u00f5\u0001\u0000\u0000\u0000\u047e\u047f\u0003B\u0019\u0000\u047f\u0480"+ + "\u0001\u0000\u0000\u0000\u0480\u0481\u0006s\u000b\u0000\u0481\u00f7\u0001"+ + "\u0000\u0000\u0000\u0482\u0483\u0003D\u001a\u0000\u0483\u0484\u0001\u0000"+ + "\u0000\u0000\u0484\u0485\u0006t\u000b\u0000\u0485\u00f9\u0001\u0000\u0000"+ + "\u0000\u0486\u0487\u0003F\u001b\u0000\u0487\u0488\u0001\u0000\u0000\u0000"+ + "\u0488\u0489\u0006u\u000b\u0000\u0489\u00fb\u0001\u0000\u0000\u0000\u048a"+ + "\u048b\u0003H\u001c\u0000\u048b\u048c\u0001\u0000\u0000\u0000\u048c\u048d"+ + "\u0006v\u0010\u0000\u048d\u048e\u0006v\f\u0000\u048e\u00fd\u0001\u0000"+ + "\u0000\u0000\u048f\u0490\u0003j-\u0000\u0490\u0491\u0001\u0000\u0000\u0000"+ + "\u0491\u0492\u0006w\u0014\u0000\u0492\u00ff\u0001\u0000\u0000\u0000\u0493"+ + "\u0494\u0003p0\u0000\u0494\u0495\u0001\u0000\u0000\u0000\u0495\u0496\u0006"+ + "x\u0013\u0000\u0496\u0101\u0001\u0000\u0000\u0000\u0497\u0498\u0003t2"+ + "\u0000\u0498\u0499\u0001\u0000\u0000\u0000\u0499\u049a\u0006y\u0017\u0000"+ + "\u049a\u0103\u0001\u0000\u0000\u0000\u049b\u049c\u0004z\n\u0000\u049c"+ + "\u049d\u0003\u008c>\u0000\u049d\u049e\u0001\u0000\u0000\u0000\u049e\u049f"+ + "\u0006z\u0018\u0000\u049f\u0105\u0001\u0000\u0000\u0000\u04a0\u04a1\u0004"+ + "{\u000b\u0000\u04a1\u04a2\u0003\u00aeO\u0000\u04a2\u04a3\u0001\u0000\u0000"+ + "\u0000\u04a3\u04a4\u0006{\u0019\u0000\u04a4\u0107\u0001\u0000\u0000\u0000"+ + "\u04a5\u04a6\u0007\f\u0000\u0000\u04a6\u04a7\u0007\u0002\u0000\u0000\u04a7"+ + "\u0109\u0001\u0000\u0000\u0000\u04a8\u04a9\u0003\u00f4r\u0000\u04a9\u04aa"+ + "\u0001\u0000\u0000\u0000\u04aa\u04ab\u0006}\u001a\u0000\u04ab\u010b\u0001"+ + "\u0000\u0000\u0000\u04ac\u04ad\u0003B\u0019\u0000\u04ad\u04ae\u0001\u0000"+ + "\u0000\u0000\u04ae\u04af\u0006~\u000b\u0000\u04af\u010d\u0001\u0000\u0000"+ + "\u0000\u04b0\u04b1\u0003D\u001a\u0000\u04b1\u04b2\u0001\u0000\u0000\u0000"+ + "\u04b2\u04b3\u0006\u007f\u000b\u0000\u04b3\u010f\u0001\u0000\u0000\u0000"+ + "\u04b4\u04b5\u0003F\u001b\u0000\u04b5\u04b6\u0001\u0000\u0000\u0000\u04b6"+ + "\u04b7\u0006\u0080\u000b\u0000\u04b7\u0111\u0001\u0000\u0000\u0000\u04b8"+ + "\u04b9\u0003H\u001c\u0000\u04b9\u04ba\u0001\u0000\u0000\u0000\u04ba\u04bb"+ + "\u0006\u0081\u0010\u0000\u04bb\u04bc\u0006\u0081\f\u0000\u04bc\u0113\u0001"+ + "\u0000\u0000\u0000\u04bd\u04be\u0003\u00b0P\u0000\u04be\u04bf\u0001\u0000"+ + "\u0000\u0000\u04bf\u04c0\u0006\u0082\u000e\u0000\u04c0\u04c1\u0006\u0082"+ + "\u001b\u0000\u04c1\u0115\u0001\u0000\u0000\u0000\u04c2\u04c3\u0007\u0007"+ + "\u0000\u0000\u04c3\u04c4\u0007\t\u0000\u0000\u04c4\u04c5\u0001\u0000\u0000"+ + "\u0000\u04c5\u04c6\u0006\u0083\u001c\u0000\u04c6\u0117\u0001\u0000\u0000"+ + "\u0000\u04c7\u04c8\u0007\u0013\u0000\u0000\u04c8\u04c9\u0007\u0001\u0000"+ + "\u0000\u04c9\u04ca\u0007\u0005\u0000\u0000\u04ca\u04cb\u0007\n\u0000\u0000"+ + "\u04cb\u04cc\u0001\u0000\u0000\u0000\u04cc\u04cd\u0006\u0084\u001c\u0000"+ + "\u04cd\u0119\u0001\u0000\u0000\u0000\u04ce\u04cf\b#\u0000\u0000\u04cf"+ + "\u011b\u0001\u0000\u0000\u0000\u04d0\u04d2\u0003\u011a\u0085\u0000\u04d1"+ + "\u04d0\u0001\u0000\u0000\u0000\u04d2\u04d3\u0001\u0000\u0000\u0000\u04d3"+ + "\u04d1\u0001\u0000\u0000\u0000\u04d3\u04d4\u0001\u0000\u0000\u0000\u04d4"+ + "\u04d5\u0001\u0000\u0000\u0000\u04d5\u04d6\u0003n/\u0000\u04d6\u04d8\u0001"+ + "\u0000\u0000\u0000\u04d7\u04d1\u0001\u0000\u0000\u0000\u04d7\u04d8\u0001"+ + "\u0000\u0000\u0000\u04d8\u04da\u0001\u0000\u0000\u0000\u04d9\u04db\u0003"+ + "\u011a\u0085\u0000\u04da\u04d9\u0001\u0000\u0000\u0000\u04db\u04dc\u0001"+ + "\u0000\u0000\u0000\u04dc\u04da\u0001\u0000\u0000\u0000\u04dc\u04dd\u0001"+ + "\u0000\u0000\u0000\u04dd\u011d\u0001\u0000\u0000\u0000\u04de\u04df\u0003"+ + "\u011c\u0086\u0000\u04df\u04e0\u0001\u0000\u0000\u0000\u04e0\u04e1\u0006"+ + "\u0087\u001d\u0000\u04e1\u011f\u0001\u0000\u0000\u0000\u04e2\u04e3\u0003"+ + "B\u0019\u0000\u04e3\u04e4\u0001\u0000\u0000\u0000\u04e4\u04e5\u0006\u0088"+ + "\u000b\u0000\u04e5\u0121\u0001\u0000\u0000\u0000\u04e6\u04e7\u0003D\u001a"+ + "\u0000\u04e7\u04e8\u0001\u0000\u0000\u0000\u04e8\u04e9\u0006\u0089\u000b"+ + "\u0000\u04e9\u0123\u0001\u0000\u0000\u0000\u04ea\u04eb\u0003F\u001b\u0000"+ + "\u04eb\u04ec\u0001\u0000\u0000\u0000\u04ec\u04ed\u0006\u008a\u000b\u0000"+ + "\u04ed\u0125\u0001\u0000\u0000\u0000\u04ee\u04ef\u0003H\u001c\u0000\u04ef"+ + "\u04f0\u0001\u0000\u0000\u0000\u04f0\u04f1\u0006\u008b\u0010\u0000\u04f1"+ + "\u04f2\u0006\u008b\f\u0000\u04f2\u04f3\u0006\u008b\f\u0000\u04f3\u0127"+ + "\u0001\u0000\u0000\u0000\u04f4\u04f5\u0003j-\u0000\u04f5\u04f6\u0001\u0000"+ + "\u0000\u0000\u04f6\u04f7\u0006\u008c\u0014\u0000\u04f7\u0129\u0001\u0000"+ + "\u0000\u0000\u04f8\u04f9\u0003p0\u0000\u04f9\u04fa\u0001\u0000\u0000\u0000"+ + "\u04fa\u04fb\u0006\u008d\u0013\u0000\u04fb\u012b\u0001\u0000\u0000\u0000"+ + "\u04fc\u04fd\u0003t2\u0000\u04fd\u04fe\u0001\u0000\u0000\u0000\u04fe\u04ff"+ + "\u0006\u008e\u0017\u0000\u04ff\u012d\u0001\u0000\u0000\u0000\u0500\u0501"+ + "\u0003\u0118\u0084\u0000\u0501\u0502\u0001\u0000\u0000\u0000\u0502\u0503"+ + "\u0006\u008f\u001e\u0000\u0503\u012f\u0001\u0000\u0000\u0000\u0504\u0505"+ + "\u0003\u00f4r\u0000\u0505\u0506\u0001\u0000\u0000\u0000\u0506\u0507\u0006"+ + "\u0090\u001a\u0000\u0507\u0131\u0001\u0000\u0000\u0000\u0508\u0509\u0003"+ + "\u00b8T\u0000\u0509\u050a\u0001\u0000\u0000\u0000\u050a\u050b\u0006\u0091"+ + "\u001f\u0000\u050b\u0133\u0001\u0000\u0000\u0000\u050c\u050d\u0004\u0092"+ + "\f\u0000\u050d\u050e\u0003\u008c>\u0000\u050e\u050f\u0001\u0000\u0000"+ + "\u0000\u050f\u0510\u0006\u0092\u0018\u0000\u0510\u0135\u0001\u0000\u0000"+ + "\u0000\u0511\u0512\u0004\u0093\r\u0000\u0512\u0513\u0003\u00aeO\u0000"+ + "\u0513\u0514\u0001\u0000\u0000\u0000\u0514\u0515\u0006\u0093\u0019\u0000"+ + "\u0515\u0137\u0001\u0000\u0000\u0000\u0516\u0517\u0003B\u0019\u0000\u0517"+ + "\u0518\u0001\u0000\u0000\u0000\u0518\u0519\u0006\u0094\u000b\u0000\u0519"+ + "\u0139\u0001\u0000\u0000\u0000\u051a\u051b\u0003D\u001a\u0000\u051b\u051c"+ + "\u0001\u0000\u0000\u0000\u051c\u051d\u0006\u0095\u000b\u0000\u051d\u013b"+ + "\u0001\u0000\u0000\u0000\u051e\u051f\u0003F\u001b\u0000\u051f\u0520\u0001"+ + "\u0000\u0000\u0000\u0520\u0521\u0006\u0096\u000b\u0000\u0521\u013d\u0001"+ + "\u0000\u0000\u0000\u0522\u0523\u0003H\u001c\u0000\u0523\u0524\u0001\u0000"+ + "\u0000\u0000\u0524\u0525\u0006\u0097\u0010\u0000\u0525\u0526\u0006\u0097"+ + "\f\u0000\u0526\u013f\u0001\u0000\u0000\u0000\u0527\u0528\u0003t2\u0000"+ + "\u0528\u0529\u0001\u0000\u0000\u0000\u0529\u052a\u0006\u0098\u0017\u0000"+ + "\u052a\u0141\u0001\u0000\u0000\u0000\u052b\u052c\u0004\u0099\u000e\u0000"+ + "\u052c\u052d\u0003\u008c>\u0000\u052d\u052e\u0001\u0000\u0000\u0000\u052e"+ + "\u052f\u0006\u0099\u0018\u0000\u052f\u0143\u0001\u0000\u0000\u0000\u0530"+ + "\u0531\u0004\u009a\u000f\u0000\u0531\u0532\u0003\u00aeO\u0000\u0532\u0533"+ + "\u0001\u0000\u0000\u0000\u0533\u0534\u0006\u009a\u0019\u0000\u0534\u0145"+ + "\u0001\u0000\u0000\u0000\u0535\u0536\u0003\u00b8T\u0000\u0536\u0537\u0001"+ + "\u0000\u0000\u0000\u0537\u0538\u0006\u009b\u001f\u0000\u0538\u0147\u0001"+ + "\u0000\u0000\u0000\u0539\u053a\u0003\u00b4R\u0000\u053a\u053b\u0001\u0000"+ + "\u0000\u0000\u053b\u053c\u0006\u009c \u0000\u053c\u0149\u0001\u0000\u0000"+ + "\u0000\u053d\u053e\u0003B\u0019\u0000\u053e\u053f\u0001\u0000\u0000\u0000"+ + "\u053f\u0540\u0006\u009d\u000b\u0000\u0540\u014b\u0001\u0000\u0000\u0000"+ + "\u0541\u0542\u0003D\u001a\u0000\u0542\u0543\u0001\u0000\u0000\u0000\u0543"+ + "\u0544\u0006\u009e\u000b\u0000\u0544\u014d\u0001\u0000\u0000\u0000\u0545"+ + "\u0546\u0003F\u001b\u0000\u0546\u0547\u0001\u0000\u0000\u0000\u0547\u0548"+ + "\u0006\u009f\u000b\u0000\u0548\u014f\u0001\u0000\u0000\u0000\u0549\u054a"+ + "\u0003H\u001c\u0000\u054a\u054b\u0001\u0000\u0000\u0000\u054b\u054c\u0006"+ + "\u00a0\u0010\u0000\u054c\u054d\u0006\u00a0\f\u0000\u054d\u0151\u0001\u0000"+ + "\u0000\u0000\u054e\u054f\u0007\u0001\u0000\u0000\u054f\u0550\u0007\t\u0000"+ + "\u0000\u0550\u0551\u0007\u000f\u0000\u0000\u0551\u0552\u0007\u0007\u0000"+ + "\u0000\u0552\u0153\u0001\u0000\u0000\u0000\u0553\u0554\u0003B\u0019\u0000"+ + "\u0554\u0555\u0001\u0000\u0000\u0000\u0555\u0556\u0006\u00a2\u000b\u0000"+ + "\u0556\u0155\u0001\u0000\u0000\u0000\u0557\u0558\u0003D\u001a\u0000\u0558"+ + "\u0559\u0001\u0000\u0000\u0000\u0559\u055a\u0006\u00a3\u000b\u0000\u055a"+ + "\u0157\u0001\u0000\u0000\u0000\u055b\u055c\u0003F\u001b\u0000\u055c\u055d"+ + "\u0001\u0000\u0000\u0000\u055d\u055e\u0006\u00a4\u000b\u0000\u055e\u0159"+ + "\u0001\u0000\u0000\u0000\u055f\u0560\u0003\u00b2Q\u0000\u0560\u0561\u0001"+ + "\u0000\u0000\u0000\u0561\u0562\u0006\u00a5\u0011\u0000\u0562\u0563\u0006"+ + "\u00a5\f\u0000\u0563\u015b\u0001\u0000\u0000\u0000\u0564\u0565\u0003n"+ + "/\u0000\u0565\u0566\u0001\u0000\u0000\u0000\u0566\u0567\u0006\u00a6\u0012"+ + "\u0000\u0567\u015d\u0001\u0000\u0000\u0000\u0568\u056e\u0003T\"\u0000"+ + "\u0569\u056e\u0003J\u001d\u0000\u056a\u056e\u0003t2\u0000\u056b\u056e"+ + "\u0003L\u001e\u0000\u056c\u056e\u0003Z%\u0000\u056d\u0568\u0001\u0000"+ + "\u0000\u0000\u056d\u0569\u0001\u0000\u0000\u0000\u056d\u056a\u0001\u0000"+ + "\u0000\u0000\u056d\u056b\u0001\u0000\u0000\u0000\u056d\u056c\u0001\u0000"+ + "\u0000\u0000\u056e\u056f\u0001\u0000\u0000\u0000\u056f\u056d\u0001\u0000"+ + "\u0000\u0000\u056f\u0570\u0001\u0000\u0000\u0000\u0570\u015f\u0001\u0000"+ + "\u0000\u0000\u0571\u0572\u0003B\u0019\u0000\u0572\u0573\u0001\u0000\u0000"+ + "\u0000\u0573\u0574\u0006\u00a8\u000b\u0000\u0574\u0161\u0001\u0000\u0000"+ + "\u0000\u0575\u0576\u0003D\u001a\u0000\u0576\u0577\u0001\u0000\u0000\u0000"+ + "\u0577\u0578\u0006\u00a9\u000b\u0000\u0578\u0163\u0001\u0000\u0000\u0000"+ + "\u0579\u057a\u0003F\u001b\u0000\u057a\u057b\u0001\u0000\u0000\u0000\u057b"+ + "\u057c\u0006\u00aa\u000b\u0000\u057c\u0165\u0001\u0000\u0000\u0000\u057d"+ + "\u057e\u0003H\u001c\u0000\u057e\u057f\u0001\u0000\u0000\u0000\u057f\u0580"+ + "\u0006\u00ab\u0010\u0000\u0580\u0581\u0006\u00ab\f\u0000\u0581\u0167\u0001"+ + "\u0000\u0000\u0000\u0582\u0583\u0003n/\u0000\u0583\u0584\u0001\u0000\u0000"+ + "\u0000\u0584\u0585\u0006\u00ac\u0012\u0000\u0585\u0169\u0001\u0000\u0000"+ + "\u0000\u0586\u0587\u0003p0\u0000\u0587\u0588\u0001\u0000\u0000\u0000\u0588"+ + "\u0589\u0006\u00ad\u0013\u0000\u0589\u016b\u0001\u0000\u0000\u0000\u058a"+ + "\u058b\u0003t2\u0000\u058b\u058c\u0001\u0000\u0000\u0000\u058c\u058d\u0006"+ + "\u00ae\u0017\u0000\u058d\u016d\u0001\u0000\u0000\u0000\u058e\u058f\u0003"+ + "\u0116\u0083\u0000\u058f\u0590\u0001\u0000\u0000\u0000\u0590\u0591\u0006"+ + "\u00af!\u0000\u0591\u0592\u0006\u00af\"\u0000\u0592\u016f\u0001\u0000"+ + "\u0000\u0000\u0593\u0594\u0003\u00dae\u0000\u0594\u0595\u0001\u0000\u0000"+ + "\u0000\u0595\u0596\u0006\u00b0\u0015\u0000\u0596\u0171\u0001\u0000\u0000"+ + "\u0000\u0597\u0598\u0003^\'\u0000\u0598\u0599\u0001\u0000\u0000\u0000"+ + "\u0599\u059a\u0006\u00b1\u0016\u0000\u059a\u0173\u0001\u0000\u0000\u0000"+ + "\u059b\u059c\u0003B\u0019\u0000\u059c\u059d\u0001\u0000\u0000\u0000\u059d"+ + "\u059e\u0006\u00b2\u000b\u0000\u059e\u0175\u0001\u0000\u0000\u0000\u059f"+ + "\u05a0\u0003D\u001a\u0000\u05a0\u05a1\u0001\u0000\u0000\u0000\u05a1\u05a2"+ + "\u0006\u00b3\u000b\u0000\u05a2\u0177\u0001\u0000\u0000\u0000\u05a3\u05a4"+ + "\u0003F\u001b\u0000\u05a4\u05a5\u0001\u0000\u0000\u0000\u05a5\u05a6\u0006"+ + "\u00b4\u000b\u0000\u05a6\u0179\u0001\u0000\u0000\u0000\u05a7\u05a8\u0003"+ + "H\u001c\u0000\u05a8\u05a9\u0001\u0000\u0000\u0000\u05a9\u05aa\u0006\u00b5"+ + "\u0010\u0000\u05aa\u05ab\u0006\u00b5\f\u0000\u05ab\u05ac\u0006\u00b5\f"+ + "\u0000\u05ac\u017b\u0001\u0000\u0000\u0000\u05ad\u05ae\u0003p0\u0000\u05ae"+ + "\u05af\u0001\u0000\u0000\u0000\u05af\u05b0\u0006\u00b6\u0013\u0000\u05b0"+ + "\u017d\u0001\u0000\u0000\u0000\u05b1\u05b2\u0003t2\u0000\u05b2\u05b3\u0001"+ + "\u0000\u0000\u0000\u05b3\u05b4\u0006\u00b7\u0017\u0000\u05b4\u017f\u0001"+ + "\u0000\u0000\u0000\u05b5\u05b6\u0003\u00f4r\u0000\u05b6\u05b7\u0001\u0000"+ + "\u0000\u0000\u05b7\u05b8\u0006\u00b8\u001a\u0000\u05b8\u0181\u0001\u0000"+ + "\u0000\u0000\u05b9\u05ba\u0003B\u0019\u0000\u05ba\u05bb\u0001\u0000\u0000"+ + "\u0000\u05bb\u05bc\u0006\u00b9\u000b\u0000\u05bc\u0183\u0001\u0000\u0000"+ + "\u0000\u05bd\u05be\u0003D\u001a\u0000\u05be\u05bf\u0001\u0000\u0000\u0000"+ + "\u05bf\u05c0\u0006\u00ba\u000b\u0000\u05c0\u0185\u0001\u0000\u0000\u0000"+ + "\u05c1\u05c2\u0003F\u001b\u0000\u05c2\u05c3\u0001\u0000\u0000\u0000\u05c3"+ + "\u05c4\u0006\u00bb\u000b\u0000\u05c4\u0187\u0001\u0000\u0000\u0000\u05c5"+ + "\u05c6\u0003H\u001c\u0000\u05c6\u05c7\u0001\u0000\u0000\u0000\u05c7\u05c8"+ + "\u0006\u00bc\u0010\u0000\u05c8\u05c9\u0006\u00bc\f\u0000\u05c9\u0189\u0001"+ + "\u0000\u0000\u0000\u05ca\u05cb\u00036\u0013\u0000\u05cb\u05cc\u0001\u0000"+ + "\u0000\u0000\u05cc\u05cd\u0006\u00bd#\u0000\u05cd\u018b\u0001\u0000\u0000"+ + "\u0000\u05ce\u05cf\u0003\u0108|\u0000\u05cf\u05d0\u0001\u0000\u0000\u0000"+ + "\u05d0\u05d1\u0006\u00be$\u0000\u05d1\u018d\u0001\u0000\u0000\u0000\u05d2"+ + "\u05d3\u0003\u0116\u0083\u0000\u05d3\u05d4\u0001\u0000\u0000\u0000\u05d4"+ + "\u05d5\u0006\u00bf!\u0000\u05d5\u05d6\u0006\u00bf\f\u0000\u05d6\u05d7"+ + "\u0006\u00bf\u0000\u0000\u05d7\u018f\u0001\u0000\u0000\u0000\u05d8\u05d9"+ + "\u0007\u0014\u0000\u0000\u05d9\u05da\u0007\u0002\u0000\u0000\u05da\u05db"+ + "\u0007\u0001\u0000\u0000\u05db\u05dc\u0007\t\u0000\u0000\u05dc\u05dd\u0007"+ + "\u0011\u0000\u0000\u05dd\u05de\u0001\u0000\u0000\u0000\u05de\u05df\u0006"+ + "\u00c0\f\u0000\u05df\u05e0\u0006\u00c0\u0000\u0000\u05e0\u0191\u0001\u0000"+ + "\u0000\u0000\u05e1\u05e2\u0003\u00b4R\u0000\u05e2\u05e3\u0001\u0000\u0000"+ + "\u0000\u05e3\u05e4\u0006\u00c1 \u0000\u05e4\u0193\u0001\u0000\u0000\u0000"+ + "\u05e5\u05e6\u0003\u00b8T\u0000\u05e6\u05e7\u0001\u0000\u0000\u0000\u05e7"+ + "\u05e8\u0006\u00c2\u001f\u0000\u05e8\u0195\u0001\u0000\u0000\u0000\u05e9"+ + "\u05ea\u0003B\u0019\u0000\u05ea\u05eb\u0001\u0000\u0000\u0000\u05eb\u05ec"+ + "\u0006\u00c3\u000b\u0000\u05ec\u0197\u0001\u0000\u0000\u0000\u05ed\u05ee"+ + "\u0003D\u001a\u0000\u05ee\u05ef\u0001\u0000\u0000\u0000\u05ef\u05f0\u0006"+ + "\u00c4\u000b\u0000\u05f0\u0199\u0001\u0000\u0000\u0000\u05f1\u05f2\u0003"+ + "F\u001b\u0000\u05f2\u05f3\u0001\u0000\u0000\u0000\u05f3\u05f4\u0006\u00c5"+ + "\u000b\u0000\u05f4\u019b\u0001\u0000\u0000\u0000\u05f5\u05f6\u0003H\u001c"+ + "\u0000\u05f6\u05f7\u0001\u0000\u0000\u0000\u05f7\u05f8\u0006\u00c6\u0010"+ + "\u0000\u05f8\u05f9\u0006\u00c6\f\u0000\u05f9\u019d\u0001\u0000\u0000\u0000"+ + "\u05fa\u05fb\u0003\u00dae\u0000\u05fb\u05fc\u0001\u0000\u0000\u0000\u05fc"+ + "\u05fd\u0006\u00c7\u0015\u0000\u05fd\u05fe\u0006\u00c7\f\u0000\u05fe\u05ff"+ + "\u0006\u00c7%\u0000\u05ff\u019f\u0001\u0000\u0000\u0000\u0600\u0601\u0003"+ + "^\'\u0000\u0601\u0602\u0001\u0000\u0000\u0000\u0602\u0603\u0006\u00c8"+ + "\u0016\u0000\u0603\u0604\u0006\u00c8\f\u0000\u0604\u0605\u0006\u00c8%"+ + "\u0000\u0605\u01a1\u0001\u0000\u0000\u0000\u0606\u0607\u0003B\u0019\u0000"+ + "\u0607\u0608\u0001\u0000\u0000\u0000\u0608\u0609\u0006\u00c9\u000b\u0000"+ + "\u0609\u01a3\u0001\u0000\u0000\u0000\u060a\u060b\u0003D\u001a\u0000\u060b"+ + "\u060c\u0001\u0000\u0000\u0000\u060c\u060d\u0006\u00ca\u000b\u0000\u060d"+ + "\u01a5\u0001\u0000\u0000\u0000\u060e\u060f\u0003F\u001b\u0000\u060f\u0610"+ + "\u0001\u0000\u0000\u0000\u0610\u0611\u0006\u00cb\u000b\u0000\u0611\u01a7"+ + "\u0001\u0000\u0000\u0000\u0612\u0613\u0003n/\u0000\u0613\u0614\u0001\u0000"+ + "\u0000\u0000\u0614\u0615\u0006\u00cc\u0012\u0000\u0615\u0616\u0006\u00cc"+ + "\f\u0000\u0616\u0617\u0006\u00cc\t\u0000\u0617\u01a9\u0001\u0000\u0000"+ + "\u0000\u0618\u0619\u0003p0\u0000\u0619\u061a\u0001\u0000\u0000\u0000\u061a"+ + "\u061b\u0006\u00cd\u0013\u0000\u061b\u061c\u0006\u00cd\f\u0000\u061c\u061d"+ + "\u0006\u00cd\t\u0000\u061d\u01ab\u0001\u0000\u0000\u0000\u061e\u061f\u0003"+ + "B\u0019\u0000\u061f\u0620\u0001\u0000\u0000\u0000\u0620\u0621\u0006\u00ce"+ + "\u000b\u0000\u0621\u01ad\u0001\u0000\u0000\u0000\u0622\u0623\u0003D\u001a"+ + "\u0000\u0623\u0624\u0001\u0000\u0000\u0000\u0624\u0625\u0006\u00cf\u000b"+ + "\u0000\u0625\u01af\u0001\u0000\u0000\u0000\u0626\u0627\u0003F\u001b\u0000"+ + "\u0627\u0628\u0001\u0000\u0000\u0000\u0628\u0629\u0006\u00d0\u000b\u0000"+ + "\u0629\u01b1\u0001\u0000\u0000\u0000\u062a\u062b\u0003\u00b8T\u0000\u062b"+ + "\u062c\u0001\u0000\u0000\u0000\u062c\u062d\u0006\u00d1\f\u0000\u062d\u062e"+ + "\u0006\u00d1\u0000\u0000\u062e\u062f\u0006\u00d1\u001f\u0000\u062f\u01b3"+ + "\u0001\u0000\u0000\u0000\u0630\u0631\u0003\u00b4R\u0000\u0631\u0632\u0001"+ + "\u0000\u0000\u0000\u0632\u0633\u0006\u00d2\f\u0000\u0633\u0634\u0006\u00d2"+ + "\u0000\u0000\u0634\u0635\u0006\u00d2 \u0000\u0635\u01b5\u0001\u0000\u0000"+ + "\u0000\u0636\u0637\u0003d*\u0000\u0637\u0638\u0001\u0000\u0000\u0000\u0638"+ + "\u0639\u0006\u00d3\f\u0000\u0639\u063a\u0006\u00d3\u0000\u0000\u063a\u063b"+ + "\u0006\u00d3&\u0000\u063b\u01b7\u0001\u0000\u0000\u0000\u063c\u063d\u0003"+ + "H\u001c\u0000\u063d\u063e\u0001\u0000\u0000\u0000\u063e\u063f\u0006\u00d4"+ + "\u0010\u0000\u063f\u0640\u0006\u00d4\f\u0000\u0640\u01b9\u0001\u0000\u0000"+ + "\u0000B\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f"+ + "\r\u000e\u000f\u028f\u0299\u029d\u02a0\u02a9\u02ab\u02b6\u02c9\u02ce\u02d7"+ + "\u02de\u02e3\u02e5\u02f0\u02f8\u02fb\u02fd\u0302\u0307\u030d\u0314\u0319"+ + "\u031f\u0322\u032a\u032e\u03ae\u03b3\u03ba\u03bc\u03cc\u03d1\u03d6\u03d8"+ + "\u03de\u042b\u0430\u0461\u0465\u046a\u046f\u0474\u0476\u047a\u047c\u04d3"+ + "\u04d7\u04dc\u056d\u056f\'\u0005\u0001\u0000\u0005\u0004\u0000\u0005\u0006"+ + "\u0000\u0005\u0002\u0000\u0005\u0003\u0000\u0005\b\u0000\u0005\u0005\u0000"+ + "\u0005\t\u0000\u0005\u000b\u0000\u0005\u000e\u0000\u0005\r\u0000\u0000"+ + "\u0001\u0000\u0004\u0000\u0000\u0007\u0010\u0000\u0007F\u0000\u0005\u0000"+ + "\u0000\u0007\u001d\u0000\u0007G\u0000\u0007&\u0000\u0007\'\u0000\u0007"+ + "$\u0000\u0007Q\u0000\u0007\u001e\u0000\u0007)\u0000\u00075\u0000\u0007"+ + "E\u0000\u0007U\u0000\u0005\n\u0000\u0005\u0007\u0000\u0007_\u0000\u0007"+ + "^\u0000\u0007I\u0000\u0007H\u0000\u0007]\u0000\u0005\f\u0000\u0007\u0014"+ + "\u0000\u0007Y\u0000\u0005\u000f\u0000\u0007!\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp index a2b339f378f12..50493f584fe4c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.interp @@ -23,7 +23,11 @@ null null null null -':' +null +null +null +null +null '|' null null @@ -33,6 +37,7 @@ null 'asc' '=' '::' +':' ',' 'desc' '.' @@ -113,6 +118,10 @@ null null null null +'USING' +null +null +null null null null @@ -141,11 +150,15 @@ WHERE DEV_INLINESTATS DEV_LOOKUP DEV_METRICS +DEV_JOIN +DEV_JOIN_FULL +DEV_JOIN_LEFT +DEV_JOIN_RIGHT +DEV_JOIN_LOOKUP UNKNOWN_CMD LINE_COMMENT MULTILINE_COMMENT WS -COLON PIPE QUOTED_STRING INTEGER_LITERAL @@ -155,6 +168,7 @@ AND ASC ASSIGN CAST_OP +COLON COMMA DESC DOT @@ -235,6 +249,10 @@ LOOKUP_WS LOOKUP_FIELD_LINE_COMMENT LOOKUP_FIELD_MULTILINE_COMMENT LOOKUP_FIELD_WS +USING +JOIN_LINE_COMMENT +JOIN_MULTILINE_COMMENT +JOIN_WS METRICS_LINE_COMMENT METRICS_MULTILINE_COMMENT METRICS_WS @@ -305,7 +323,11 @@ enrichCommand enrichWithClause lookupCommand inlinestatsCommand +joinCommand +joinTarget +joinCondition +joinPredicate atn: -[4, 1, 119, 603, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 134, 8, 1, 10, 1, 12, 1, 137, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 145, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 163, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 175, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 182, 8, 5, 10, 5, 12, 5, 185, 9, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 192, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 198, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 206, 8, 5, 10, 5, 12, 5, 209, 9, 5, 1, 6, 1, 6, 3, 6, 213, 8, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 220, 8, 6, 1, 6, 1, 6, 1, 6, 3, 6, 225, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 236, 8, 8, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 242, 8, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 5, 9, 250, 8, 9, 10, 9, 12, 9, 253, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 263, 8, 10, 1, 10, 1, 10, 1, 10, 5, 10, 268, 8, 10, 10, 10, 12, 10, 271, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 279, 8, 11, 10, 11, 12, 11, 282, 9, 11, 3, 11, 284, 8, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 5, 15, 298, 8, 15, 10, 15, 12, 15, 301, 9, 15, 1, 16, 1, 16, 1, 16, 3, 16, 306, 8, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 5, 17, 314, 8, 17, 10, 17, 12, 17, 317, 9, 17, 1, 17, 3, 17, 320, 8, 17, 1, 18, 1, 18, 1, 18, 3, 18, 325, 8, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 3, 21, 335, 8, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 341, 8, 22, 10, 22, 12, 22, 344, 9, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 5, 24, 354, 8, 24, 10, 24, 12, 24, 357, 9, 24, 1, 24, 3, 24, 360, 8, 24, 1, 24, 1, 24, 3, 24, 364, 8, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 3, 26, 371, 8, 26, 1, 26, 1, 26, 3, 26, 375, 8, 26, 1, 27, 1, 27, 1, 27, 5, 27, 380, 8, 27, 10, 27, 12, 27, 383, 9, 27, 1, 28, 1, 28, 1, 28, 3, 28, 388, 8, 28, 1, 29, 1, 29, 1, 29, 5, 29, 393, 8, 29, 10, 29, 12, 29, 396, 9, 29, 1, 30, 1, 30, 1, 30, 5, 30, 401, 8, 30, 10, 30, 12, 30, 404, 9, 30, 1, 31, 1, 31, 1, 31, 5, 31, 409, 8, 31, 10, 31, 12, 31, 412, 9, 31, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 3, 33, 419, 8, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 434, 8, 34, 10, 34, 12, 34, 437, 9, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 445, 8, 34, 10, 34, 12, 34, 448, 9, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 456, 8, 34, 10, 34, 12, 34, 459, 9, 34, 1, 34, 1, 34, 3, 34, 463, 8, 34, 1, 35, 1, 35, 3, 35, 467, 8, 35, 1, 36, 1, 36, 1, 36, 3, 36, 472, 8, 36, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 5, 38, 481, 8, 38, 10, 38, 12, 38, 484, 9, 38, 1, 39, 1, 39, 3, 39, 488, 8, 39, 1, 39, 1, 39, 3, 39, 492, 8, 39, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 5, 42, 504, 8, 42, 10, 42, 12, 42, 507, 9, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 517, 8, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 5, 47, 529, 8, 47, 10, 47, 12, 47, 532, 9, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 3, 50, 542, 8, 50, 1, 51, 3, 51, 545, 8, 51, 1, 51, 1, 51, 1, 52, 3, 52, 550, 8, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 3, 58, 572, 8, 58, 1, 58, 1, 58, 1, 58, 1, 58, 5, 58, 578, 8, 58, 10, 58, 12, 58, 581, 9, 58, 3, 58, 583, 8, 58, 1, 59, 1, 59, 1, 59, 3, 59, 588, 8, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 3, 61, 601, 8, 61, 1, 61, 0, 4, 2, 10, 18, 20, 62, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 0, 8, 1, 0, 59, 60, 1, 0, 61, 63, 2, 0, 26, 26, 76, 76, 1, 0, 67, 68, 2, 0, 31, 31, 35, 35, 2, 0, 38, 38, 41, 41, 2, 0, 37, 37, 51, 51, 2, 0, 52, 52, 54, 58, 628, 0, 124, 1, 0, 0, 0, 2, 127, 1, 0, 0, 0, 4, 144, 1, 0, 0, 0, 6, 162, 1, 0, 0, 0, 8, 164, 1, 0, 0, 0, 10, 197, 1, 0, 0, 0, 12, 224, 1, 0, 0, 0, 14, 226, 1, 0, 0, 0, 16, 235, 1, 0, 0, 0, 18, 241, 1, 0, 0, 0, 20, 262, 1, 0, 0, 0, 22, 272, 1, 0, 0, 0, 24, 287, 1, 0, 0, 0, 26, 289, 1, 0, 0, 0, 28, 291, 1, 0, 0, 0, 30, 294, 1, 0, 0, 0, 32, 305, 1, 0, 0, 0, 34, 309, 1, 0, 0, 0, 36, 324, 1, 0, 0, 0, 38, 328, 1, 0, 0, 0, 40, 330, 1, 0, 0, 0, 42, 334, 1, 0, 0, 0, 44, 336, 1, 0, 0, 0, 46, 345, 1, 0, 0, 0, 48, 349, 1, 0, 0, 0, 50, 365, 1, 0, 0, 0, 52, 368, 1, 0, 0, 0, 54, 376, 1, 0, 0, 0, 56, 384, 1, 0, 0, 0, 58, 389, 1, 0, 0, 0, 60, 397, 1, 0, 0, 0, 62, 405, 1, 0, 0, 0, 64, 413, 1, 0, 0, 0, 66, 418, 1, 0, 0, 0, 68, 462, 1, 0, 0, 0, 70, 466, 1, 0, 0, 0, 72, 471, 1, 0, 0, 0, 74, 473, 1, 0, 0, 0, 76, 476, 1, 0, 0, 0, 78, 485, 1, 0, 0, 0, 80, 493, 1, 0, 0, 0, 82, 496, 1, 0, 0, 0, 84, 499, 1, 0, 0, 0, 86, 508, 1, 0, 0, 0, 88, 512, 1, 0, 0, 0, 90, 518, 1, 0, 0, 0, 92, 522, 1, 0, 0, 0, 94, 525, 1, 0, 0, 0, 96, 533, 1, 0, 0, 0, 98, 537, 1, 0, 0, 0, 100, 541, 1, 0, 0, 0, 102, 544, 1, 0, 0, 0, 104, 549, 1, 0, 0, 0, 106, 553, 1, 0, 0, 0, 108, 555, 1, 0, 0, 0, 110, 557, 1, 0, 0, 0, 112, 560, 1, 0, 0, 0, 114, 564, 1, 0, 0, 0, 116, 567, 1, 0, 0, 0, 118, 587, 1, 0, 0, 0, 120, 591, 1, 0, 0, 0, 122, 596, 1, 0, 0, 0, 124, 125, 3, 2, 1, 0, 125, 126, 5, 0, 0, 1, 126, 1, 1, 0, 0, 0, 127, 128, 6, 1, -1, 0, 128, 129, 3, 4, 2, 0, 129, 135, 1, 0, 0, 0, 130, 131, 10, 1, 0, 0, 131, 132, 5, 25, 0, 0, 132, 134, 3, 6, 3, 0, 133, 130, 1, 0, 0, 0, 134, 137, 1, 0, 0, 0, 135, 133, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 3, 1, 0, 0, 0, 137, 135, 1, 0, 0, 0, 138, 145, 3, 110, 55, 0, 139, 145, 3, 34, 17, 0, 140, 145, 3, 28, 14, 0, 141, 145, 3, 114, 57, 0, 142, 143, 4, 2, 1, 0, 143, 145, 3, 48, 24, 0, 144, 138, 1, 0, 0, 0, 144, 139, 1, 0, 0, 0, 144, 140, 1, 0, 0, 0, 144, 141, 1, 0, 0, 0, 144, 142, 1, 0, 0, 0, 145, 5, 1, 0, 0, 0, 146, 163, 3, 50, 25, 0, 147, 163, 3, 8, 4, 0, 148, 163, 3, 80, 40, 0, 149, 163, 3, 74, 37, 0, 150, 163, 3, 52, 26, 0, 151, 163, 3, 76, 38, 0, 152, 163, 3, 82, 41, 0, 153, 163, 3, 84, 42, 0, 154, 163, 3, 88, 44, 0, 155, 163, 3, 90, 45, 0, 156, 163, 3, 116, 58, 0, 157, 163, 3, 92, 46, 0, 158, 159, 4, 3, 2, 0, 159, 163, 3, 122, 61, 0, 160, 161, 4, 3, 3, 0, 161, 163, 3, 120, 60, 0, 162, 146, 1, 0, 0, 0, 162, 147, 1, 0, 0, 0, 162, 148, 1, 0, 0, 0, 162, 149, 1, 0, 0, 0, 162, 150, 1, 0, 0, 0, 162, 151, 1, 0, 0, 0, 162, 152, 1, 0, 0, 0, 162, 153, 1, 0, 0, 0, 162, 154, 1, 0, 0, 0, 162, 155, 1, 0, 0, 0, 162, 156, 1, 0, 0, 0, 162, 157, 1, 0, 0, 0, 162, 158, 1, 0, 0, 0, 162, 160, 1, 0, 0, 0, 163, 7, 1, 0, 0, 0, 164, 165, 5, 16, 0, 0, 165, 166, 3, 10, 5, 0, 166, 9, 1, 0, 0, 0, 167, 168, 6, 5, -1, 0, 168, 169, 5, 44, 0, 0, 169, 198, 3, 10, 5, 8, 170, 198, 3, 16, 8, 0, 171, 198, 3, 12, 6, 0, 172, 174, 3, 16, 8, 0, 173, 175, 5, 44, 0, 0, 174, 173, 1, 0, 0, 0, 174, 175, 1, 0, 0, 0, 175, 176, 1, 0, 0, 0, 176, 177, 5, 39, 0, 0, 177, 178, 5, 43, 0, 0, 178, 183, 3, 16, 8, 0, 179, 180, 5, 34, 0, 0, 180, 182, 3, 16, 8, 0, 181, 179, 1, 0, 0, 0, 182, 185, 1, 0, 0, 0, 183, 181, 1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184, 186, 1, 0, 0, 0, 185, 183, 1, 0, 0, 0, 186, 187, 5, 50, 0, 0, 187, 198, 1, 0, 0, 0, 188, 189, 3, 16, 8, 0, 189, 191, 5, 40, 0, 0, 190, 192, 5, 44, 0, 0, 191, 190, 1, 0, 0, 0, 191, 192, 1, 0, 0, 0, 192, 193, 1, 0, 0, 0, 193, 194, 5, 45, 0, 0, 194, 198, 1, 0, 0, 0, 195, 196, 4, 5, 4, 0, 196, 198, 3, 14, 7, 0, 197, 167, 1, 0, 0, 0, 197, 170, 1, 0, 0, 0, 197, 171, 1, 0, 0, 0, 197, 172, 1, 0, 0, 0, 197, 188, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 207, 1, 0, 0, 0, 199, 200, 10, 5, 0, 0, 200, 201, 5, 30, 0, 0, 201, 206, 3, 10, 5, 6, 202, 203, 10, 4, 0, 0, 203, 204, 5, 47, 0, 0, 204, 206, 3, 10, 5, 5, 205, 199, 1, 0, 0, 0, 205, 202, 1, 0, 0, 0, 206, 209, 1, 0, 0, 0, 207, 205, 1, 0, 0, 0, 207, 208, 1, 0, 0, 0, 208, 11, 1, 0, 0, 0, 209, 207, 1, 0, 0, 0, 210, 212, 3, 16, 8, 0, 211, 213, 5, 44, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 42, 0, 0, 215, 216, 3, 106, 53, 0, 216, 225, 1, 0, 0, 0, 217, 219, 3, 16, 8, 0, 218, 220, 5, 44, 0, 0, 219, 218, 1, 0, 0, 0, 219, 220, 1, 0, 0, 0, 220, 221, 1, 0, 0, 0, 221, 222, 5, 49, 0, 0, 222, 223, 3, 106, 53, 0, 223, 225, 1, 0, 0, 0, 224, 210, 1, 0, 0, 0, 224, 217, 1, 0, 0, 0, 225, 13, 1, 0, 0, 0, 226, 227, 3, 58, 29, 0, 227, 228, 5, 24, 0, 0, 228, 229, 3, 68, 34, 0, 229, 15, 1, 0, 0, 0, 230, 236, 3, 18, 9, 0, 231, 232, 3, 18, 9, 0, 232, 233, 3, 108, 54, 0, 233, 234, 3, 18, 9, 0, 234, 236, 1, 0, 0, 0, 235, 230, 1, 0, 0, 0, 235, 231, 1, 0, 0, 0, 236, 17, 1, 0, 0, 0, 237, 238, 6, 9, -1, 0, 238, 242, 3, 20, 10, 0, 239, 240, 7, 0, 0, 0, 240, 242, 3, 18, 9, 3, 241, 237, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 251, 1, 0, 0, 0, 243, 244, 10, 2, 0, 0, 244, 245, 7, 1, 0, 0, 245, 250, 3, 18, 9, 3, 246, 247, 10, 1, 0, 0, 247, 248, 7, 0, 0, 0, 248, 250, 3, 18, 9, 2, 249, 243, 1, 0, 0, 0, 249, 246, 1, 0, 0, 0, 250, 253, 1, 0, 0, 0, 251, 249, 1, 0, 0, 0, 251, 252, 1, 0, 0, 0, 252, 19, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 254, 255, 6, 10, -1, 0, 255, 263, 3, 68, 34, 0, 256, 263, 3, 58, 29, 0, 257, 263, 3, 22, 11, 0, 258, 259, 5, 43, 0, 0, 259, 260, 3, 10, 5, 0, 260, 261, 5, 50, 0, 0, 261, 263, 1, 0, 0, 0, 262, 254, 1, 0, 0, 0, 262, 256, 1, 0, 0, 0, 262, 257, 1, 0, 0, 0, 262, 258, 1, 0, 0, 0, 263, 269, 1, 0, 0, 0, 264, 265, 10, 1, 0, 0, 265, 266, 5, 33, 0, 0, 266, 268, 3, 26, 13, 0, 267, 264, 1, 0, 0, 0, 268, 271, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 21, 1, 0, 0, 0, 271, 269, 1, 0, 0, 0, 272, 273, 3, 24, 12, 0, 273, 283, 5, 43, 0, 0, 274, 284, 5, 61, 0, 0, 275, 280, 3, 10, 5, 0, 276, 277, 5, 34, 0, 0, 277, 279, 3, 10, 5, 0, 278, 276, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 284, 1, 0, 0, 0, 282, 280, 1, 0, 0, 0, 283, 274, 1, 0, 0, 0, 283, 275, 1, 0, 0, 0, 283, 284, 1, 0, 0, 0, 284, 285, 1, 0, 0, 0, 285, 286, 5, 50, 0, 0, 286, 23, 1, 0, 0, 0, 287, 288, 3, 72, 36, 0, 288, 25, 1, 0, 0, 0, 289, 290, 3, 64, 32, 0, 290, 27, 1, 0, 0, 0, 291, 292, 5, 12, 0, 0, 292, 293, 3, 30, 15, 0, 293, 29, 1, 0, 0, 0, 294, 299, 3, 32, 16, 0, 295, 296, 5, 34, 0, 0, 296, 298, 3, 32, 16, 0, 297, 295, 1, 0, 0, 0, 298, 301, 1, 0, 0, 0, 299, 297, 1, 0, 0, 0, 299, 300, 1, 0, 0, 0, 300, 31, 1, 0, 0, 0, 301, 299, 1, 0, 0, 0, 302, 303, 3, 58, 29, 0, 303, 304, 5, 32, 0, 0, 304, 306, 1, 0, 0, 0, 305, 302, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 1, 0, 0, 0, 307, 308, 3, 10, 5, 0, 308, 33, 1, 0, 0, 0, 309, 310, 5, 6, 0, 0, 310, 315, 3, 36, 18, 0, 311, 312, 5, 34, 0, 0, 312, 314, 3, 36, 18, 0, 313, 311, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 319, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 320, 3, 42, 21, 0, 319, 318, 1, 0, 0, 0, 319, 320, 1, 0, 0, 0, 320, 35, 1, 0, 0, 0, 321, 322, 3, 38, 19, 0, 322, 323, 5, 24, 0, 0, 323, 325, 1, 0, 0, 0, 324, 321, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 326, 1, 0, 0, 0, 326, 327, 3, 40, 20, 0, 327, 37, 1, 0, 0, 0, 328, 329, 5, 76, 0, 0, 329, 39, 1, 0, 0, 0, 330, 331, 7, 2, 0, 0, 331, 41, 1, 0, 0, 0, 332, 335, 3, 44, 22, 0, 333, 335, 3, 46, 23, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 43, 1, 0, 0, 0, 336, 337, 5, 75, 0, 0, 337, 342, 5, 76, 0, 0, 338, 339, 5, 34, 0, 0, 339, 341, 5, 76, 0, 0, 340, 338, 1, 0, 0, 0, 341, 344, 1, 0, 0, 0, 342, 340, 1, 0, 0, 0, 342, 343, 1, 0, 0, 0, 343, 45, 1, 0, 0, 0, 344, 342, 1, 0, 0, 0, 345, 346, 5, 65, 0, 0, 346, 347, 3, 44, 22, 0, 347, 348, 5, 66, 0, 0, 348, 47, 1, 0, 0, 0, 349, 350, 5, 19, 0, 0, 350, 355, 3, 36, 18, 0, 351, 352, 5, 34, 0, 0, 352, 354, 3, 36, 18, 0, 353, 351, 1, 0, 0, 0, 354, 357, 1, 0, 0, 0, 355, 353, 1, 0, 0, 0, 355, 356, 1, 0, 0, 0, 356, 359, 1, 0, 0, 0, 357, 355, 1, 0, 0, 0, 358, 360, 3, 54, 27, 0, 359, 358, 1, 0, 0, 0, 359, 360, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 362, 5, 29, 0, 0, 362, 364, 3, 30, 15, 0, 363, 361, 1, 0, 0, 0, 363, 364, 1, 0, 0, 0, 364, 49, 1, 0, 0, 0, 365, 366, 5, 4, 0, 0, 366, 367, 3, 30, 15, 0, 367, 51, 1, 0, 0, 0, 368, 370, 5, 15, 0, 0, 369, 371, 3, 54, 27, 0, 370, 369, 1, 0, 0, 0, 370, 371, 1, 0, 0, 0, 371, 374, 1, 0, 0, 0, 372, 373, 5, 29, 0, 0, 373, 375, 3, 30, 15, 0, 374, 372, 1, 0, 0, 0, 374, 375, 1, 0, 0, 0, 375, 53, 1, 0, 0, 0, 376, 381, 3, 56, 28, 0, 377, 378, 5, 34, 0, 0, 378, 380, 3, 56, 28, 0, 379, 377, 1, 0, 0, 0, 380, 383, 1, 0, 0, 0, 381, 379, 1, 0, 0, 0, 381, 382, 1, 0, 0, 0, 382, 55, 1, 0, 0, 0, 383, 381, 1, 0, 0, 0, 384, 387, 3, 32, 16, 0, 385, 386, 5, 16, 0, 0, 386, 388, 3, 10, 5, 0, 387, 385, 1, 0, 0, 0, 387, 388, 1, 0, 0, 0, 388, 57, 1, 0, 0, 0, 389, 394, 3, 72, 36, 0, 390, 391, 5, 36, 0, 0, 391, 393, 3, 72, 36, 0, 392, 390, 1, 0, 0, 0, 393, 396, 1, 0, 0, 0, 394, 392, 1, 0, 0, 0, 394, 395, 1, 0, 0, 0, 395, 59, 1, 0, 0, 0, 396, 394, 1, 0, 0, 0, 397, 402, 3, 66, 33, 0, 398, 399, 5, 36, 0, 0, 399, 401, 3, 66, 33, 0, 400, 398, 1, 0, 0, 0, 401, 404, 1, 0, 0, 0, 402, 400, 1, 0, 0, 0, 402, 403, 1, 0, 0, 0, 403, 61, 1, 0, 0, 0, 404, 402, 1, 0, 0, 0, 405, 410, 3, 60, 30, 0, 406, 407, 5, 34, 0, 0, 407, 409, 3, 60, 30, 0, 408, 406, 1, 0, 0, 0, 409, 412, 1, 0, 0, 0, 410, 408, 1, 0, 0, 0, 410, 411, 1, 0, 0, 0, 411, 63, 1, 0, 0, 0, 412, 410, 1, 0, 0, 0, 413, 414, 7, 3, 0, 0, 414, 65, 1, 0, 0, 0, 415, 419, 5, 80, 0, 0, 416, 417, 4, 33, 10, 0, 417, 419, 3, 70, 35, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 419, 67, 1, 0, 0, 0, 420, 463, 5, 45, 0, 0, 421, 422, 3, 104, 52, 0, 422, 423, 5, 67, 0, 0, 423, 463, 1, 0, 0, 0, 424, 463, 3, 102, 51, 0, 425, 463, 3, 104, 52, 0, 426, 463, 3, 98, 49, 0, 427, 463, 3, 70, 35, 0, 428, 463, 3, 106, 53, 0, 429, 430, 5, 65, 0, 0, 430, 435, 3, 100, 50, 0, 431, 432, 5, 34, 0, 0, 432, 434, 3, 100, 50, 0, 433, 431, 1, 0, 0, 0, 434, 437, 1, 0, 0, 0, 435, 433, 1, 0, 0, 0, 435, 436, 1, 0, 0, 0, 436, 438, 1, 0, 0, 0, 437, 435, 1, 0, 0, 0, 438, 439, 5, 66, 0, 0, 439, 463, 1, 0, 0, 0, 440, 441, 5, 65, 0, 0, 441, 446, 3, 98, 49, 0, 442, 443, 5, 34, 0, 0, 443, 445, 3, 98, 49, 0, 444, 442, 1, 0, 0, 0, 445, 448, 1, 0, 0, 0, 446, 444, 1, 0, 0, 0, 446, 447, 1, 0, 0, 0, 447, 449, 1, 0, 0, 0, 448, 446, 1, 0, 0, 0, 449, 450, 5, 66, 0, 0, 450, 463, 1, 0, 0, 0, 451, 452, 5, 65, 0, 0, 452, 457, 3, 106, 53, 0, 453, 454, 5, 34, 0, 0, 454, 456, 3, 106, 53, 0, 455, 453, 1, 0, 0, 0, 456, 459, 1, 0, 0, 0, 457, 455, 1, 0, 0, 0, 457, 458, 1, 0, 0, 0, 458, 460, 1, 0, 0, 0, 459, 457, 1, 0, 0, 0, 460, 461, 5, 66, 0, 0, 461, 463, 1, 0, 0, 0, 462, 420, 1, 0, 0, 0, 462, 421, 1, 0, 0, 0, 462, 424, 1, 0, 0, 0, 462, 425, 1, 0, 0, 0, 462, 426, 1, 0, 0, 0, 462, 427, 1, 0, 0, 0, 462, 428, 1, 0, 0, 0, 462, 429, 1, 0, 0, 0, 462, 440, 1, 0, 0, 0, 462, 451, 1, 0, 0, 0, 463, 69, 1, 0, 0, 0, 464, 467, 5, 48, 0, 0, 465, 467, 5, 64, 0, 0, 466, 464, 1, 0, 0, 0, 466, 465, 1, 0, 0, 0, 467, 71, 1, 0, 0, 0, 468, 472, 3, 64, 32, 0, 469, 470, 4, 36, 11, 0, 470, 472, 3, 70, 35, 0, 471, 468, 1, 0, 0, 0, 471, 469, 1, 0, 0, 0, 472, 73, 1, 0, 0, 0, 473, 474, 5, 9, 0, 0, 474, 475, 5, 27, 0, 0, 475, 75, 1, 0, 0, 0, 476, 477, 5, 14, 0, 0, 477, 482, 3, 78, 39, 0, 478, 479, 5, 34, 0, 0, 479, 481, 3, 78, 39, 0, 480, 478, 1, 0, 0, 0, 481, 484, 1, 0, 0, 0, 482, 480, 1, 0, 0, 0, 482, 483, 1, 0, 0, 0, 483, 77, 1, 0, 0, 0, 484, 482, 1, 0, 0, 0, 485, 487, 3, 10, 5, 0, 486, 488, 7, 4, 0, 0, 487, 486, 1, 0, 0, 0, 487, 488, 1, 0, 0, 0, 488, 491, 1, 0, 0, 0, 489, 490, 5, 46, 0, 0, 490, 492, 7, 5, 0, 0, 491, 489, 1, 0, 0, 0, 491, 492, 1, 0, 0, 0, 492, 79, 1, 0, 0, 0, 493, 494, 5, 8, 0, 0, 494, 495, 3, 62, 31, 0, 495, 81, 1, 0, 0, 0, 496, 497, 5, 2, 0, 0, 497, 498, 3, 62, 31, 0, 498, 83, 1, 0, 0, 0, 499, 500, 5, 11, 0, 0, 500, 505, 3, 86, 43, 0, 501, 502, 5, 34, 0, 0, 502, 504, 3, 86, 43, 0, 503, 501, 1, 0, 0, 0, 504, 507, 1, 0, 0, 0, 505, 503, 1, 0, 0, 0, 505, 506, 1, 0, 0, 0, 506, 85, 1, 0, 0, 0, 507, 505, 1, 0, 0, 0, 508, 509, 3, 60, 30, 0, 509, 510, 5, 84, 0, 0, 510, 511, 3, 60, 30, 0, 511, 87, 1, 0, 0, 0, 512, 513, 5, 1, 0, 0, 513, 514, 3, 20, 10, 0, 514, 516, 3, 106, 53, 0, 515, 517, 3, 94, 47, 0, 516, 515, 1, 0, 0, 0, 516, 517, 1, 0, 0, 0, 517, 89, 1, 0, 0, 0, 518, 519, 5, 7, 0, 0, 519, 520, 3, 20, 10, 0, 520, 521, 3, 106, 53, 0, 521, 91, 1, 0, 0, 0, 522, 523, 5, 10, 0, 0, 523, 524, 3, 58, 29, 0, 524, 93, 1, 0, 0, 0, 525, 530, 3, 96, 48, 0, 526, 527, 5, 34, 0, 0, 527, 529, 3, 96, 48, 0, 528, 526, 1, 0, 0, 0, 529, 532, 1, 0, 0, 0, 530, 528, 1, 0, 0, 0, 530, 531, 1, 0, 0, 0, 531, 95, 1, 0, 0, 0, 532, 530, 1, 0, 0, 0, 533, 534, 3, 64, 32, 0, 534, 535, 5, 32, 0, 0, 535, 536, 3, 68, 34, 0, 536, 97, 1, 0, 0, 0, 537, 538, 7, 6, 0, 0, 538, 99, 1, 0, 0, 0, 539, 542, 3, 102, 51, 0, 540, 542, 3, 104, 52, 0, 541, 539, 1, 0, 0, 0, 541, 540, 1, 0, 0, 0, 542, 101, 1, 0, 0, 0, 543, 545, 7, 0, 0, 0, 544, 543, 1, 0, 0, 0, 544, 545, 1, 0, 0, 0, 545, 546, 1, 0, 0, 0, 546, 547, 5, 28, 0, 0, 547, 103, 1, 0, 0, 0, 548, 550, 7, 0, 0, 0, 549, 548, 1, 0, 0, 0, 549, 550, 1, 0, 0, 0, 550, 551, 1, 0, 0, 0, 551, 552, 5, 27, 0, 0, 552, 105, 1, 0, 0, 0, 553, 554, 5, 26, 0, 0, 554, 107, 1, 0, 0, 0, 555, 556, 7, 7, 0, 0, 556, 109, 1, 0, 0, 0, 557, 558, 5, 5, 0, 0, 558, 559, 3, 112, 56, 0, 559, 111, 1, 0, 0, 0, 560, 561, 5, 65, 0, 0, 561, 562, 3, 2, 1, 0, 562, 563, 5, 66, 0, 0, 563, 113, 1, 0, 0, 0, 564, 565, 5, 13, 0, 0, 565, 566, 5, 100, 0, 0, 566, 115, 1, 0, 0, 0, 567, 568, 5, 3, 0, 0, 568, 571, 5, 90, 0, 0, 569, 570, 5, 88, 0, 0, 570, 572, 3, 60, 30, 0, 571, 569, 1, 0, 0, 0, 571, 572, 1, 0, 0, 0, 572, 582, 1, 0, 0, 0, 573, 574, 5, 89, 0, 0, 574, 579, 3, 118, 59, 0, 575, 576, 5, 34, 0, 0, 576, 578, 3, 118, 59, 0, 577, 575, 1, 0, 0, 0, 578, 581, 1, 0, 0, 0, 579, 577, 1, 0, 0, 0, 579, 580, 1, 0, 0, 0, 580, 583, 1, 0, 0, 0, 581, 579, 1, 0, 0, 0, 582, 573, 1, 0, 0, 0, 582, 583, 1, 0, 0, 0, 583, 117, 1, 0, 0, 0, 584, 585, 3, 60, 30, 0, 585, 586, 5, 32, 0, 0, 586, 588, 1, 0, 0, 0, 587, 584, 1, 0, 0, 0, 587, 588, 1, 0, 0, 0, 588, 589, 1, 0, 0, 0, 589, 590, 3, 60, 30, 0, 590, 119, 1, 0, 0, 0, 591, 592, 5, 18, 0, 0, 592, 593, 3, 36, 18, 0, 593, 594, 5, 88, 0, 0, 594, 595, 3, 62, 31, 0, 595, 121, 1, 0, 0, 0, 596, 597, 5, 17, 0, 0, 597, 600, 3, 54, 27, 0, 598, 599, 5, 29, 0, 0, 599, 601, 3, 30, 15, 0, 600, 598, 1, 0, 0, 0, 600, 601, 1, 0, 0, 0, 601, 123, 1, 0, 0, 0, 58, 135, 144, 162, 174, 183, 191, 197, 205, 207, 212, 219, 224, 235, 241, 249, 251, 262, 269, 280, 283, 299, 305, 315, 319, 324, 334, 342, 355, 359, 363, 370, 374, 381, 387, 394, 402, 410, 418, 435, 446, 457, 462, 466, 471, 482, 487, 491, 505, 516, 530, 541, 544, 549, 571, 579, 582, 587, 600] \ No newline at end of file +[4, 1, 128, 635, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 142, 8, 1, 10, 1, 12, 1, 145, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 153, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 173, 8, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 185, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 192, 8, 5, 10, 5, 12, 5, 195, 9, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 202, 8, 5, 1, 5, 1, 5, 1, 5, 3, 5, 207, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 215, 8, 5, 10, 5, 12, 5, 218, 9, 5, 1, 6, 1, 6, 3, 6, 222, 8, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 229, 8, 6, 1, 6, 1, 6, 1, 6, 3, 6, 234, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 245, 8, 8, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 251, 8, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 5, 9, 259, 8, 9, 10, 9, 12, 9, 262, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 272, 8, 10, 1, 10, 1, 10, 1, 10, 5, 10, 277, 8, 10, 10, 10, 12, 10, 280, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 288, 8, 11, 10, 11, 12, 11, 291, 9, 11, 3, 11, 293, 8, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 5, 15, 307, 8, 15, 10, 15, 12, 15, 310, 9, 15, 1, 16, 1, 16, 1, 16, 3, 16, 315, 8, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 5, 17, 323, 8, 17, 10, 17, 12, 17, 326, 9, 17, 1, 17, 3, 17, 329, 8, 17, 1, 18, 1, 18, 1, 18, 3, 18, 334, 8, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 3, 21, 344, 8, 21, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 350, 8, 22, 10, 22, 12, 22, 353, 9, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 5, 24, 363, 8, 24, 10, 24, 12, 24, 366, 9, 24, 1, 24, 3, 24, 369, 8, 24, 1, 24, 1, 24, 3, 24, 373, 8, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 3, 26, 380, 8, 26, 1, 26, 1, 26, 3, 26, 384, 8, 26, 1, 27, 1, 27, 1, 27, 5, 27, 389, 8, 27, 10, 27, 12, 27, 392, 9, 27, 1, 28, 1, 28, 1, 28, 3, 28, 397, 8, 28, 1, 29, 1, 29, 1, 29, 5, 29, 402, 8, 29, 10, 29, 12, 29, 405, 9, 29, 1, 30, 1, 30, 1, 30, 5, 30, 410, 8, 30, 10, 30, 12, 30, 413, 9, 30, 1, 31, 1, 31, 1, 31, 5, 31, 418, 8, 31, 10, 31, 12, 31, 421, 9, 31, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 3, 33, 428, 8, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 443, 8, 34, 10, 34, 12, 34, 446, 9, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 454, 8, 34, 10, 34, 12, 34, 457, 9, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 5, 34, 465, 8, 34, 10, 34, 12, 34, 468, 9, 34, 1, 34, 1, 34, 3, 34, 472, 8, 34, 1, 35, 1, 35, 3, 35, 476, 8, 35, 1, 36, 1, 36, 1, 36, 3, 36, 481, 8, 36, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 5, 38, 490, 8, 38, 10, 38, 12, 38, 493, 9, 38, 1, 39, 1, 39, 3, 39, 497, 8, 39, 1, 39, 1, 39, 3, 39, 501, 8, 39, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 5, 42, 513, 8, 42, 10, 42, 12, 42, 516, 9, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 526, 8, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 5, 47, 538, 8, 47, 10, 47, 12, 47, 541, 9, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 50, 1, 50, 3, 50, 551, 8, 50, 1, 51, 3, 51, 554, 8, 51, 1, 51, 1, 51, 1, 52, 3, 52, 559, 8, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 3, 58, 581, 8, 58, 1, 58, 1, 58, 1, 58, 1, 58, 5, 58, 587, 8, 58, 10, 58, 12, 58, 590, 9, 58, 3, 58, 592, 8, 58, 1, 59, 1, 59, 1, 59, 3, 59, 597, 8, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 3, 61, 610, 8, 61, 1, 62, 3, 62, 613, 8, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 3, 63, 622, 8, 63, 1, 64, 1, 64, 1, 64, 1, 64, 5, 64, 628, 8, 64, 10, 64, 12, 64, 631, 9, 64, 1, 65, 1, 65, 1, 65, 0, 4, 2, 10, 18, 20, 66, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 0, 9, 1, 0, 64, 65, 1, 0, 66, 68, 2, 0, 30, 30, 81, 81, 1, 0, 72, 73, 2, 0, 35, 35, 40, 40, 2, 0, 43, 43, 46, 46, 2, 0, 42, 42, 56, 56, 2, 0, 57, 57, 59, 63, 1, 0, 22, 24, 660, 0, 132, 1, 0, 0, 0, 2, 135, 1, 0, 0, 0, 4, 152, 1, 0, 0, 0, 6, 172, 1, 0, 0, 0, 8, 174, 1, 0, 0, 0, 10, 206, 1, 0, 0, 0, 12, 233, 1, 0, 0, 0, 14, 235, 1, 0, 0, 0, 16, 244, 1, 0, 0, 0, 18, 250, 1, 0, 0, 0, 20, 271, 1, 0, 0, 0, 22, 281, 1, 0, 0, 0, 24, 296, 1, 0, 0, 0, 26, 298, 1, 0, 0, 0, 28, 300, 1, 0, 0, 0, 30, 303, 1, 0, 0, 0, 32, 314, 1, 0, 0, 0, 34, 318, 1, 0, 0, 0, 36, 333, 1, 0, 0, 0, 38, 337, 1, 0, 0, 0, 40, 339, 1, 0, 0, 0, 42, 343, 1, 0, 0, 0, 44, 345, 1, 0, 0, 0, 46, 354, 1, 0, 0, 0, 48, 358, 1, 0, 0, 0, 50, 374, 1, 0, 0, 0, 52, 377, 1, 0, 0, 0, 54, 385, 1, 0, 0, 0, 56, 393, 1, 0, 0, 0, 58, 398, 1, 0, 0, 0, 60, 406, 1, 0, 0, 0, 62, 414, 1, 0, 0, 0, 64, 422, 1, 0, 0, 0, 66, 427, 1, 0, 0, 0, 68, 471, 1, 0, 0, 0, 70, 475, 1, 0, 0, 0, 72, 480, 1, 0, 0, 0, 74, 482, 1, 0, 0, 0, 76, 485, 1, 0, 0, 0, 78, 494, 1, 0, 0, 0, 80, 502, 1, 0, 0, 0, 82, 505, 1, 0, 0, 0, 84, 508, 1, 0, 0, 0, 86, 517, 1, 0, 0, 0, 88, 521, 1, 0, 0, 0, 90, 527, 1, 0, 0, 0, 92, 531, 1, 0, 0, 0, 94, 534, 1, 0, 0, 0, 96, 542, 1, 0, 0, 0, 98, 546, 1, 0, 0, 0, 100, 550, 1, 0, 0, 0, 102, 553, 1, 0, 0, 0, 104, 558, 1, 0, 0, 0, 106, 562, 1, 0, 0, 0, 108, 564, 1, 0, 0, 0, 110, 566, 1, 0, 0, 0, 112, 569, 1, 0, 0, 0, 114, 573, 1, 0, 0, 0, 116, 576, 1, 0, 0, 0, 118, 596, 1, 0, 0, 0, 120, 600, 1, 0, 0, 0, 122, 605, 1, 0, 0, 0, 124, 612, 1, 0, 0, 0, 126, 618, 1, 0, 0, 0, 128, 623, 1, 0, 0, 0, 130, 632, 1, 0, 0, 0, 132, 133, 3, 2, 1, 0, 133, 134, 5, 0, 0, 1, 134, 1, 1, 0, 0, 0, 135, 136, 6, 1, -1, 0, 136, 137, 3, 4, 2, 0, 137, 143, 1, 0, 0, 0, 138, 139, 10, 1, 0, 0, 139, 140, 5, 29, 0, 0, 140, 142, 3, 6, 3, 0, 141, 138, 1, 0, 0, 0, 142, 145, 1, 0, 0, 0, 143, 141, 1, 0, 0, 0, 143, 144, 1, 0, 0, 0, 144, 3, 1, 0, 0, 0, 145, 143, 1, 0, 0, 0, 146, 153, 3, 110, 55, 0, 147, 153, 3, 34, 17, 0, 148, 153, 3, 28, 14, 0, 149, 153, 3, 114, 57, 0, 150, 151, 4, 2, 1, 0, 151, 153, 3, 48, 24, 0, 152, 146, 1, 0, 0, 0, 152, 147, 1, 0, 0, 0, 152, 148, 1, 0, 0, 0, 152, 149, 1, 0, 0, 0, 152, 150, 1, 0, 0, 0, 153, 5, 1, 0, 0, 0, 154, 173, 3, 50, 25, 0, 155, 173, 3, 8, 4, 0, 156, 173, 3, 80, 40, 0, 157, 173, 3, 74, 37, 0, 158, 173, 3, 52, 26, 0, 159, 173, 3, 76, 38, 0, 160, 173, 3, 82, 41, 0, 161, 173, 3, 84, 42, 0, 162, 173, 3, 88, 44, 0, 163, 173, 3, 90, 45, 0, 164, 173, 3, 116, 58, 0, 165, 173, 3, 92, 46, 0, 166, 167, 4, 3, 2, 0, 167, 173, 3, 122, 61, 0, 168, 169, 4, 3, 3, 0, 169, 173, 3, 120, 60, 0, 170, 171, 4, 3, 4, 0, 171, 173, 3, 124, 62, 0, 172, 154, 1, 0, 0, 0, 172, 155, 1, 0, 0, 0, 172, 156, 1, 0, 0, 0, 172, 157, 1, 0, 0, 0, 172, 158, 1, 0, 0, 0, 172, 159, 1, 0, 0, 0, 172, 160, 1, 0, 0, 0, 172, 161, 1, 0, 0, 0, 172, 162, 1, 0, 0, 0, 172, 163, 1, 0, 0, 0, 172, 164, 1, 0, 0, 0, 172, 165, 1, 0, 0, 0, 172, 166, 1, 0, 0, 0, 172, 168, 1, 0, 0, 0, 172, 170, 1, 0, 0, 0, 173, 7, 1, 0, 0, 0, 174, 175, 5, 16, 0, 0, 175, 176, 3, 10, 5, 0, 176, 9, 1, 0, 0, 0, 177, 178, 6, 5, -1, 0, 178, 179, 5, 49, 0, 0, 179, 207, 3, 10, 5, 8, 180, 207, 3, 16, 8, 0, 181, 207, 3, 12, 6, 0, 182, 184, 3, 16, 8, 0, 183, 185, 5, 49, 0, 0, 184, 183, 1, 0, 0, 0, 184, 185, 1, 0, 0, 0, 185, 186, 1, 0, 0, 0, 186, 187, 5, 44, 0, 0, 187, 188, 5, 48, 0, 0, 188, 193, 3, 16, 8, 0, 189, 190, 5, 39, 0, 0, 190, 192, 3, 16, 8, 0, 191, 189, 1, 0, 0, 0, 192, 195, 1, 0, 0, 0, 193, 191, 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 196, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 196, 197, 5, 55, 0, 0, 197, 207, 1, 0, 0, 0, 198, 199, 3, 16, 8, 0, 199, 201, 5, 45, 0, 0, 200, 202, 5, 49, 0, 0, 201, 200, 1, 0, 0, 0, 201, 202, 1, 0, 0, 0, 202, 203, 1, 0, 0, 0, 203, 204, 5, 50, 0, 0, 204, 207, 1, 0, 0, 0, 205, 207, 3, 14, 7, 0, 206, 177, 1, 0, 0, 0, 206, 180, 1, 0, 0, 0, 206, 181, 1, 0, 0, 0, 206, 182, 1, 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207, 216, 1, 0, 0, 0, 208, 209, 10, 5, 0, 0, 209, 210, 5, 34, 0, 0, 210, 215, 3, 10, 5, 6, 211, 212, 10, 4, 0, 0, 212, 213, 5, 52, 0, 0, 213, 215, 3, 10, 5, 5, 214, 208, 1, 0, 0, 0, 214, 211, 1, 0, 0, 0, 215, 218, 1, 0, 0, 0, 216, 214, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 11, 1, 0, 0, 0, 218, 216, 1, 0, 0, 0, 219, 221, 3, 16, 8, 0, 220, 222, 5, 49, 0, 0, 221, 220, 1, 0, 0, 0, 221, 222, 1, 0, 0, 0, 222, 223, 1, 0, 0, 0, 223, 224, 5, 47, 0, 0, 224, 225, 3, 106, 53, 0, 225, 234, 1, 0, 0, 0, 226, 228, 3, 16, 8, 0, 227, 229, 5, 49, 0, 0, 228, 227, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 5, 54, 0, 0, 231, 232, 3, 106, 53, 0, 232, 234, 1, 0, 0, 0, 233, 219, 1, 0, 0, 0, 233, 226, 1, 0, 0, 0, 234, 13, 1, 0, 0, 0, 235, 236, 3, 58, 29, 0, 236, 237, 5, 38, 0, 0, 237, 238, 3, 68, 34, 0, 238, 15, 1, 0, 0, 0, 239, 245, 3, 18, 9, 0, 240, 241, 3, 18, 9, 0, 241, 242, 3, 108, 54, 0, 242, 243, 3, 18, 9, 0, 243, 245, 1, 0, 0, 0, 244, 239, 1, 0, 0, 0, 244, 240, 1, 0, 0, 0, 245, 17, 1, 0, 0, 0, 246, 247, 6, 9, -1, 0, 247, 251, 3, 20, 10, 0, 248, 249, 7, 0, 0, 0, 249, 251, 3, 18, 9, 3, 250, 246, 1, 0, 0, 0, 250, 248, 1, 0, 0, 0, 251, 260, 1, 0, 0, 0, 252, 253, 10, 2, 0, 0, 253, 254, 7, 1, 0, 0, 254, 259, 3, 18, 9, 3, 255, 256, 10, 1, 0, 0, 256, 257, 7, 0, 0, 0, 257, 259, 3, 18, 9, 2, 258, 252, 1, 0, 0, 0, 258, 255, 1, 0, 0, 0, 259, 262, 1, 0, 0, 0, 260, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 19, 1, 0, 0, 0, 262, 260, 1, 0, 0, 0, 263, 264, 6, 10, -1, 0, 264, 272, 3, 68, 34, 0, 265, 272, 3, 58, 29, 0, 266, 272, 3, 22, 11, 0, 267, 268, 5, 48, 0, 0, 268, 269, 3, 10, 5, 0, 269, 270, 5, 55, 0, 0, 270, 272, 1, 0, 0, 0, 271, 263, 1, 0, 0, 0, 271, 265, 1, 0, 0, 0, 271, 266, 1, 0, 0, 0, 271, 267, 1, 0, 0, 0, 272, 278, 1, 0, 0, 0, 273, 274, 10, 1, 0, 0, 274, 275, 5, 37, 0, 0, 275, 277, 3, 26, 13, 0, 276, 273, 1, 0, 0, 0, 277, 280, 1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 21, 1, 0, 0, 0, 280, 278, 1, 0, 0, 0, 281, 282, 3, 24, 12, 0, 282, 292, 5, 48, 0, 0, 283, 293, 5, 66, 0, 0, 284, 289, 3, 10, 5, 0, 285, 286, 5, 39, 0, 0, 286, 288, 3, 10, 5, 0, 287, 285, 1, 0, 0, 0, 288, 291, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289, 290, 1, 0, 0, 0, 290, 293, 1, 0, 0, 0, 291, 289, 1, 0, 0, 0, 292, 283, 1, 0, 0, 0, 292, 284, 1, 0, 0, 0, 292, 293, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 295, 5, 55, 0, 0, 295, 23, 1, 0, 0, 0, 296, 297, 3, 72, 36, 0, 297, 25, 1, 0, 0, 0, 298, 299, 3, 64, 32, 0, 299, 27, 1, 0, 0, 0, 300, 301, 5, 12, 0, 0, 301, 302, 3, 30, 15, 0, 302, 29, 1, 0, 0, 0, 303, 308, 3, 32, 16, 0, 304, 305, 5, 39, 0, 0, 305, 307, 3, 32, 16, 0, 306, 304, 1, 0, 0, 0, 307, 310, 1, 0, 0, 0, 308, 306, 1, 0, 0, 0, 308, 309, 1, 0, 0, 0, 309, 31, 1, 0, 0, 0, 310, 308, 1, 0, 0, 0, 311, 312, 3, 58, 29, 0, 312, 313, 5, 36, 0, 0, 313, 315, 1, 0, 0, 0, 314, 311, 1, 0, 0, 0, 314, 315, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 317, 3, 10, 5, 0, 317, 33, 1, 0, 0, 0, 318, 319, 5, 6, 0, 0, 319, 324, 3, 36, 18, 0, 320, 321, 5, 39, 0, 0, 321, 323, 3, 36, 18, 0, 322, 320, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 328, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 329, 3, 42, 21, 0, 328, 327, 1, 0, 0, 0, 328, 329, 1, 0, 0, 0, 329, 35, 1, 0, 0, 0, 330, 331, 3, 38, 19, 0, 331, 332, 5, 38, 0, 0, 332, 334, 1, 0, 0, 0, 333, 330, 1, 0, 0, 0, 333, 334, 1, 0, 0, 0, 334, 335, 1, 0, 0, 0, 335, 336, 3, 40, 20, 0, 336, 37, 1, 0, 0, 0, 337, 338, 5, 81, 0, 0, 338, 39, 1, 0, 0, 0, 339, 340, 7, 2, 0, 0, 340, 41, 1, 0, 0, 0, 341, 344, 3, 44, 22, 0, 342, 344, 3, 46, 23, 0, 343, 341, 1, 0, 0, 0, 343, 342, 1, 0, 0, 0, 344, 43, 1, 0, 0, 0, 345, 346, 5, 80, 0, 0, 346, 351, 5, 81, 0, 0, 347, 348, 5, 39, 0, 0, 348, 350, 5, 81, 0, 0, 349, 347, 1, 0, 0, 0, 350, 353, 1, 0, 0, 0, 351, 349, 1, 0, 0, 0, 351, 352, 1, 0, 0, 0, 352, 45, 1, 0, 0, 0, 353, 351, 1, 0, 0, 0, 354, 355, 5, 70, 0, 0, 355, 356, 3, 44, 22, 0, 356, 357, 5, 71, 0, 0, 357, 47, 1, 0, 0, 0, 358, 359, 5, 19, 0, 0, 359, 364, 3, 36, 18, 0, 360, 361, 5, 39, 0, 0, 361, 363, 3, 36, 18, 0, 362, 360, 1, 0, 0, 0, 363, 366, 1, 0, 0, 0, 364, 362, 1, 0, 0, 0, 364, 365, 1, 0, 0, 0, 365, 368, 1, 0, 0, 0, 366, 364, 1, 0, 0, 0, 367, 369, 3, 54, 27, 0, 368, 367, 1, 0, 0, 0, 368, 369, 1, 0, 0, 0, 369, 372, 1, 0, 0, 0, 370, 371, 5, 33, 0, 0, 371, 373, 3, 30, 15, 0, 372, 370, 1, 0, 0, 0, 372, 373, 1, 0, 0, 0, 373, 49, 1, 0, 0, 0, 374, 375, 5, 4, 0, 0, 375, 376, 3, 30, 15, 0, 376, 51, 1, 0, 0, 0, 377, 379, 5, 15, 0, 0, 378, 380, 3, 54, 27, 0, 379, 378, 1, 0, 0, 0, 379, 380, 1, 0, 0, 0, 380, 383, 1, 0, 0, 0, 381, 382, 5, 33, 0, 0, 382, 384, 3, 30, 15, 0, 383, 381, 1, 0, 0, 0, 383, 384, 1, 0, 0, 0, 384, 53, 1, 0, 0, 0, 385, 390, 3, 56, 28, 0, 386, 387, 5, 39, 0, 0, 387, 389, 3, 56, 28, 0, 388, 386, 1, 0, 0, 0, 389, 392, 1, 0, 0, 0, 390, 388, 1, 0, 0, 0, 390, 391, 1, 0, 0, 0, 391, 55, 1, 0, 0, 0, 392, 390, 1, 0, 0, 0, 393, 396, 3, 32, 16, 0, 394, 395, 5, 16, 0, 0, 395, 397, 3, 10, 5, 0, 396, 394, 1, 0, 0, 0, 396, 397, 1, 0, 0, 0, 397, 57, 1, 0, 0, 0, 398, 403, 3, 72, 36, 0, 399, 400, 5, 41, 0, 0, 400, 402, 3, 72, 36, 0, 401, 399, 1, 0, 0, 0, 402, 405, 1, 0, 0, 0, 403, 401, 1, 0, 0, 0, 403, 404, 1, 0, 0, 0, 404, 59, 1, 0, 0, 0, 405, 403, 1, 0, 0, 0, 406, 411, 3, 66, 33, 0, 407, 408, 5, 41, 0, 0, 408, 410, 3, 66, 33, 0, 409, 407, 1, 0, 0, 0, 410, 413, 1, 0, 0, 0, 411, 409, 1, 0, 0, 0, 411, 412, 1, 0, 0, 0, 412, 61, 1, 0, 0, 0, 413, 411, 1, 0, 0, 0, 414, 419, 3, 60, 30, 0, 415, 416, 5, 39, 0, 0, 416, 418, 3, 60, 30, 0, 417, 415, 1, 0, 0, 0, 418, 421, 1, 0, 0, 0, 419, 417, 1, 0, 0, 0, 419, 420, 1, 0, 0, 0, 420, 63, 1, 0, 0, 0, 421, 419, 1, 0, 0, 0, 422, 423, 7, 3, 0, 0, 423, 65, 1, 0, 0, 0, 424, 428, 5, 85, 0, 0, 425, 426, 4, 33, 10, 0, 426, 428, 3, 70, 35, 0, 427, 424, 1, 0, 0, 0, 427, 425, 1, 0, 0, 0, 428, 67, 1, 0, 0, 0, 429, 472, 5, 50, 0, 0, 430, 431, 3, 104, 52, 0, 431, 432, 5, 72, 0, 0, 432, 472, 1, 0, 0, 0, 433, 472, 3, 102, 51, 0, 434, 472, 3, 104, 52, 0, 435, 472, 3, 98, 49, 0, 436, 472, 3, 70, 35, 0, 437, 472, 3, 106, 53, 0, 438, 439, 5, 70, 0, 0, 439, 444, 3, 100, 50, 0, 440, 441, 5, 39, 0, 0, 441, 443, 3, 100, 50, 0, 442, 440, 1, 0, 0, 0, 443, 446, 1, 0, 0, 0, 444, 442, 1, 0, 0, 0, 444, 445, 1, 0, 0, 0, 445, 447, 1, 0, 0, 0, 446, 444, 1, 0, 0, 0, 447, 448, 5, 71, 0, 0, 448, 472, 1, 0, 0, 0, 449, 450, 5, 70, 0, 0, 450, 455, 3, 98, 49, 0, 451, 452, 5, 39, 0, 0, 452, 454, 3, 98, 49, 0, 453, 451, 1, 0, 0, 0, 454, 457, 1, 0, 0, 0, 455, 453, 1, 0, 0, 0, 455, 456, 1, 0, 0, 0, 456, 458, 1, 0, 0, 0, 457, 455, 1, 0, 0, 0, 458, 459, 5, 71, 0, 0, 459, 472, 1, 0, 0, 0, 460, 461, 5, 70, 0, 0, 461, 466, 3, 106, 53, 0, 462, 463, 5, 39, 0, 0, 463, 465, 3, 106, 53, 0, 464, 462, 1, 0, 0, 0, 465, 468, 1, 0, 0, 0, 466, 464, 1, 0, 0, 0, 466, 467, 1, 0, 0, 0, 467, 469, 1, 0, 0, 0, 468, 466, 1, 0, 0, 0, 469, 470, 5, 71, 0, 0, 470, 472, 1, 0, 0, 0, 471, 429, 1, 0, 0, 0, 471, 430, 1, 0, 0, 0, 471, 433, 1, 0, 0, 0, 471, 434, 1, 0, 0, 0, 471, 435, 1, 0, 0, 0, 471, 436, 1, 0, 0, 0, 471, 437, 1, 0, 0, 0, 471, 438, 1, 0, 0, 0, 471, 449, 1, 0, 0, 0, 471, 460, 1, 0, 0, 0, 472, 69, 1, 0, 0, 0, 473, 476, 5, 53, 0, 0, 474, 476, 5, 69, 0, 0, 475, 473, 1, 0, 0, 0, 475, 474, 1, 0, 0, 0, 476, 71, 1, 0, 0, 0, 477, 481, 3, 64, 32, 0, 478, 479, 4, 36, 11, 0, 479, 481, 3, 70, 35, 0, 480, 477, 1, 0, 0, 0, 480, 478, 1, 0, 0, 0, 481, 73, 1, 0, 0, 0, 482, 483, 5, 9, 0, 0, 483, 484, 5, 31, 0, 0, 484, 75, 1, 0, 0, 0, 485, 486, 5, 14, 0, 0, 486, 491, 3, 78, 39, 0, 487, 488, 5, 39, 0, 0, 488, 490, 3, 78, 39, 0, 489, 487, 1, 0, 0, 0, 490, 493, 1, 0, 0, 0, 491, 489, 1, 0, 0, 0, 491, 492, 1, 0, 0, 0, 492, 77, 1, 0, 0, 0, 493, 491, 1, 0, 0, 0, 494, 496, 3, 10, 5, 0, 495, 497, 7, 4, 0, 0, 496, 495, 1, 0, 0, 0, 496, 497, 1, 0, 0, 0, 497, 500, 1, 0, 0, 0, 498, 499, 5, 51, 0, 0, 499, 501, 7, 5, 0, 0, 500, 498, 1, 0, 0, 0, 500, 501, 1, 0, 0, 0, 501, 79, 1, 0, 0, 0, 502, 503, 5, 8, 0, 0, 503, 504, 3, 62, 31, 0, 504, 81, 1, 0, 0, 0, 505, 506, 5, 2, 0, 0, 506, 507, 3, 62, 31, 0, 507, 83, 1, 0, 0, 0, 508, 509, 5, 11, 0, 0, 509, 514, 3, 86, 43, 0, 510, 511, 5, 39, 0, 0, 511, 513, 3, 86, 43, 0, 512, 510, 1, 0, 0, 0, 513, 516, 1, 0, 0, 0, 514, 512, 1, 0, 0, 0, 514, 515, 1, 0, 0, 0, 515, 85, 1, 0, 0, 0, 516, 514, 1, 0, 0, 0, 517, 518, 3, 60, 30, 0, 518, 519, 5, 89, 0, 0, 519, 520, 3, 60, 30, 0, 520, 87, 1, 0, 0, 0, 521, 522, 5, 1, 0, 0, 522, 523, 3, 20, 10, 0, 523, 525, 3, 106, 53, 0, 524, 526, 3, 94, 47, 0, 525, 524, 1, 0, 0, 0, 525, 526, 1, 0, 0, 0, 526, 89, 1, 0, 0, 0, 527, 528, 5, 7, 0, 0, 528, 529, 3, 20, 10, 0, 529, 530, 3, 106, 53, 0, 530, 91, 1, 0, 0, 0, 531, 532, 5, 10, 0, 0, 532, 533, 3, 58, 29, 0, 533, 93, 1, 0, 0, 0, 534, 539, 3, 96, 48, 0, 535, 536, 5, 39, 0, 0, 536, 538, 3, 96, 48, 0, 537, 535, 1, 0, 0, 0, 538, 541, 1, 0, 0, 0, 539, 537, 1, 0, 0, 0, 539, 540, 1, 0, 0, 0, 540, 95, 1, 0, 0, 0, 541, 539, 1, 0, 0, 0, 542, 543, 3, 64, 32, 0, 543, 544, 5, 36, 0, 0, 544, 545, 3, 68, 34, 0, 545, 97, 1, 0, 0, 0, 546, 547, 7, 6, 0, 0, 547, 99, 1, 0, 0, 0, 548, 551, 3, 102, 51, 0, 549, 551, 3, 104, 52, 0, 550, 548, 1, 0, 0, 0, 550, 549, 1, 0, 0, 0, 551, 101, 1, 0, 0, 0, 552, 554, 7, 0, 0, 0, 553, 552, 1, 0, 0, 0, 553, 554, 1, 0, 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 5, 32, 0, 0, 556, 103, 1, 0, 0, 0, 557, 559, 7, 0, 0, 0, 558, 557, 1, 0, 0, 0, 558, 559, 1, 0, 0, 0, 559, 560, 1, 0, 0, 0, 560, 561, 5, 31, 0, 0, 561, 105, 1, 0, 0, 0, 562, 563, 5, 30, 0, 0, 563, 107, 1, 0, 0, 0, 564, 565, 7, 7, 0, 0, 565, 109, 1, 0, 0, 0, 566, 567, 5, 5, 0, 0, 567, 568, 3, 112, 56, 0, 568, 111, 1, 0, 0, 0, 569, 570, 5, 70, 0, 0, 570, 571, 3, 2, 1, 0, 571, 572, 5, 71, 0, 0, 572, 113, 1, 0, 0, 0, 573, 574, 5, 13, 0, 0, 574, 575, 5, 105, 0, 0, 575, 115, 1, 0, 0, 0, 576, 577, 5, 3, 0, 0, 577, 580, 5, 95, 0, 0, 578, 579, 5, 93, 0, 0, 579, 581, 3, 60, 30, 0, 580, 578, 1, 0, 0, 0, 580, 581, 1, 0, 0, 0, 581, 591, 1, 0, 0, 0, 582, 583, 5, 94, 0, 0, 583, 588, 3, 118, 59, 0, 584, 585, 5, 39, 0, 0, 585, 587, 3, 118, 59, 0, 586, 584, 1, 0, 0, 0, 587, 590, 1, 0, 0, 0, 588, 586, 1, 0, 0, 0, 588, 589, 1, 0, 0, 0, 589, 592, 1, 0, 0, 0, 590, 588, 1, 0, 0, 0, 591, 582, 1, 0, 0, 0, 591, 592, 1, 0, 0, 0, 592, 117, 1, 0, 0, 0, 593, 594, 3, 60, 30, 0, 594, 595, 5, 36, 0, 0, 595, 597, 1, 0, 0, 0, 596, 593, 1, 0, 0, 0, 596, 597, 1, 0, 0, 0, 597, 598, 1, 0, 0, 0, 598, 599, 3, 60, 30, 0, 599, 119, 1, 0, 0, 0, 600, 601, 5, 18, 0, 0, 601, 602, 3, 36, 18, 0, 602, 603, 5, 93, 0, 0, 603, 604, 3, 62, 31, 0, 604, 121, 1, 0, 0, 0, 605, 606, 5, 17, 0, 0, 606, 609, 3, 54, 27, 0, 607, 608, 5, 33, 0, 0, 608, 610, 3, 30, 15, 0, 609, 607, 1, 0, 0, 0, 609, 610, 1, 0, 0, 0, 610, 123, 1, 0, 0, 0, 611, 613, 7, 8, 0, 0, 612, 611, 1, 0, 0, 0, 612, 613, 1, 0, 0, 0, 613, 614, 1, 0, 0, 0, 614, 615, 5, 20, 0, 0, 615, 616, 3, 126, 63, 0, 616, 617, 3, 128, 64, 0, 617, 125, 1, 0, 0, 0, 618, 621, 3, 64, 32, 0, 619, 620, 5, 89, 0, 0, 620, 622, 3, 64, 32, 0, 621, 619, 1, 0, 0, 0, 621, 622, 1, 0, 0, 0, 622, 127, 1, 0, 0, 0, 623, 624, 5, 93, 0, 0, 624, 629, 3, 130, 65, 0, 625, 626, 5, 39, 0, 0, 626, 628, 3, 130, 65, 0, 627, 625, 1, 0, 0, 0, 628, 631, 1, 0, 0, 0, 629, 627, 1, 0, 0, 0, 629, 630, 1, 0, 0, 0, 630, 129, 1, 0, 0, 0, 631, 629, 1, 0, 0, 0, 632, 633, 3, 16, 8, 0, 633, 131, 1, 0, 0, 0, 61, 143, 152, 172, 184, 193, 201, 206, 214, 216, 221, 228, 233, 244, 250, 258, 260, 271, 278, 289, 292, 308, 314, 324, 328, 333, 343, 351, 364, 368, 372, 379, 383, 390, 396, 403, 411, 419, 427, 444, 455, 466, 471, 475, 480, 491, 496, 500, 514, 525, 539, 550, 553, 558, 580, 588, 591, 596, 609, 612, 621, 629] \ No newline at end of file diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java index e36184b1f07da..e864eaff3edd7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java @@ -8,26 +8,14 @@ * 2.0. */ -import org.antlr.v4.runtime.FailedPredicateException; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.RuntimeMetaData; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.Vocabulary; -import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.atn.*; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.tree.ParseTreeListener; -import org.antlr.v4.runtime.tree.ParseTreeVisitor; -import org.antlr.v4.runtime.tree.TerminalNode; - +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.runtime.tree.*; import java.util.List; +import java.util.Iterator; +import java.util.ArrayList; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue"}) public class EsqlBaseParser extends ParserConfig { @@ -37,113 +25,120 @@ public class EsqlBaseParser extends ParserConfig { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DISSECT=1, DROP=2, ENRICH=3, EVAL=4, EXPLAIN=5, FROM=6, GROK=7, KEEP=8, - LIMIT=9, MV_EXPAND=10, RENAME=11, ROW=12, SHOW=13, SORT=14, STATS=15, - WHERE=16, DEV_INLINESTATS=17, DEV_LOOKUP=18, DEV_METRICS=19, UNKNOWN_CMD=20, - LINE_COMMENT=21, MULTILINE_COMMENT=22, WS=23, COLON=24, PIPE=25, QUOTED_STRING=26, - INTEGER_LITERAL=27, DECIMAL_LITERAL=28, BY=29, AND=30, ASC=31, ASSIGN=32, - CAST_OP=33, COMMA=34, DESC=35, DOT=36, FALSE=37, FIRST=38, IN=39, IS=40, - LAST=41, LIKE=42, LP=43, NOT=44, NULL=45, NULLS=46, OR=47, PARAM=48, RLIKE=49, - RP=50, TRUE=51, EQ=52, CIEQ=53, NEQ=54, LT=55, LTE=56, GT=57, GTE=58, - PLUS=59, MINUS=60, ASTERISK=61, SLASH=62, PERCENT=63, NAMED_OR_POSITIONAL_PARAM=64, - OPENING_BRACKET=65, CLOSING_BRACKET=66, UNQUOTED_IDENTIFIER=67, QUOTED_IDENTIFIER=68, - EXPR_LINE_COMMENT=69, EXPR_MULTILINE_COMMENT=70, EXPR_WS=71, EXPLAIN_WS=72, - EXPLAIN_LINE_COMMENT=73, EXPLAIN_MULTILINE_COMMENT=74, METADATA=75, UNQUOTED_SOURCE=76, - FROM_LINE_COMMENT=77, FROM_MULTILINE_COMMENT=78, FROM_WS=79, ID_PATTERN=80, - PROJECT_LINE_COMMENT=81, PROJECT_MULTILINE_COMMENT=82, PROJECT_WS=83, - AS=84, RENAME_LINE_COMMENT=85, RENAME_MULTILINE_COMMENT=86, RENAME_WS=87, - ON=88, WITH=89, ENRICH_POLICY_NAME=90, ENRICH_LINE_COMMENT=91, ENRICH_MULTILINE_COMMENT=92, - ENRICH_WS=93, ENRICH_FIELD_LINE_COMMENT=94, ENRICH_FIELD_MULTILINE_COMMENT=95, - ENRICH_FIELD_WS=96, MVEXPAND_LINE_COMMENT=97, MVEXPAND_MULTILINE_COMMENT=98, - MVEXPAND_WS=99, INFO=100, SHOW_LINE_COMMENT=101, SHOW_MULTILINE_COMMENT=102, - SHOW_WS=103, SETTING=104, SETTING_LINE_COMMENT=105, SETTTING_MULTILINE_COMMENT=106, - SETTING_WS=107, LOOKUP_LINE_COMMENT=108, LOOKUP_MULTILINE_COMMENT=109, - LOOKUP_WS=110, LOOKUP_FIELD_LINE_COMMENT=111, LOOKUP_FIELD_MULTILINE_COMMENT=112, - LOOKUP_FIELD_WS=113, METRICS_LINE_COMMENT=114, METRICS_MULTILINE_COMMENT=115, - METRICS_WS=116, CLOSING_METRICS_LINE_COMMENT=117, CLOSING_METRICS_MULTILINE_COMMENT=118, - CLOSING_METRICS_WS=119; + DISSECT=1, DROP=2, ENRICH=3, EVAL=4, EXPLAIN=5, FROM=6, GROK=7, KEEP=8, + LIMIT=9, MV_EXPAND=10, RENAME=11, ROW=12, SHOW=13, SORT=14, STATS=15, + WHERE=16, DEV_INLINESTATS=17, DEV_LOOKUP=18, DEV_METRICS=19, DEV_JOIN=20, + DEV_JOIN_FULL=21, DEV_JOIN_LEFT=22, DEV_JOIN_RIGHT=23, DEV_JOIN_LOOKUP=24, + UNKNOWN_CMD=25, LINE_COMMENT=26, MULTILINE_COMMENT=27, WS=28, PIPE=29, + QUOTED_STRING=30, INTEGER_LITERAL=31, DECIMAL_LITERAL=32, BY=33, AND=34, + ASC=35, ASSIGN=36, CAST_OP=37, COLON=38, COMMA=39, DESC=40, DOT=41, FALSE=42, + FIRST=43, IN=44, IS=45, LAST=46, LIKE=47, LP=48, NOT=49, NULL=50, NULLS=51, + OR=52, PARAM=53, RLIKE=54, RP=55, TRUE=56, EQ=57, CIEQ=58, NEQ=59, LT=60, + LTE=61, GT=62, GTE=63, PLUS=64, MINUS=65, ASTERISK=66, SLASH=67, PERCENT=68, + NAMED_OR_POSITIONAL_PARAM=69, OPENING_BRACKET=70, CLOSING_BRACKET=71, + UNQUOTED_IDENTIFIER=72, QUOTED_IDENTIFIER=73, EXPR_LINE_COMMENT=74, EXPR_MULTILINE_COMMENT=75, + EXPR_WS=76, EXPLAIN_WS=77, EXPLAIN_LINE_COMMENT=78, EXPLAIN_MULTILINE_COMMENT=79, + METADATA=80, UNQUOTED_SOURCE=81, FROM_LINE_COMMENT=82, FROM_MULTILINE_COMMENT=83, + FROM_WS=84, ID_PATTERN=85, PROJECT_LINE_COMMENT=86, PROJECT_MULTILINE_COMMENT=87, + PROJECT_WS=88, AS=89, RENAME_LINE_COMMENT=90, RENAME_MULTILINE_COMMENT=91, + RENAME_WS=92, ON=93, WITH=94, ENRICH_POLICY_NAME=95, ENRICH_LINE_COMMENT=96, + ENRICH_MULTILINE_COMMENT=97, ENRICH_WS=98, ENRICH_FIELD_LINE_COMMENT=99, + ENRICH_FIELD_MULTILINE_COMMENT=100, ENRICH_FIELD_WS=101, MVEXPAND_LINE_COMMENT=102, + MVEXPAND_MULTILINE_COMMENT=103, MVEXPAND_WS=104, INFO=105, SHOW_LINE_COMMENT=106, + SHOW_MULTILINE_COMMENT=107, SHOW_WS=108, SETTING=109, SETTING_LINE_COMMENT=110, + SETTTING_MULTILINE_COMMENT=111, SETTING_WS=112, LOOKUP_LINE_COMMENT=113, + LOOKUP_MULTILINE_COMMENT=114, LOOKUP_WS=115, LOOKUP_FIELD_LINE_COMMENT=116, + LOOKUP_FIELD_MULTILINE_COMMENT=117, LOOKUP_FIELD_WS=118, USING=119, JOIN_LINE_COMMENT=120, + JOIN_MULTILINE_COMMENT=121, JOIN_WS=122, METRICS_LINE_COMMENT=123, METRICS_MULTILINE_COMMENT=124, + METRICS_WS=125, CLOSING_METRICS_LINE_COMMENT=126, CLOSING_METRICS_MULTILINE_COMMENT=127, + CLOSING_METRICS_WS=128; public static final int - RULE_singleStatement = 0, RULE_query = 1, RULE_sourceCommand = 2, RULE_processingCommand = 3, - RULE_whereCommand = 4, RULE_booleanExpression = 5, RULE_regexBooleanExpression = 6, - RULE_matchBooleanExpression = 7, RULE_valueExpression = 8, RULE_operatorExpression = 9, - RULE_primaryExpression = 10, RULE_functionExpression = 11, RULE_functionName = 12, - RULE_dataType = 13, RULE_rowCommand = 14, RULE_fields = 15, RULE_field = 16, - RULE_fromCommand = 17, RULE_indexPattern = 18, RULE_clusterString = 19, - RULE_indexString = 20, RULE_metadata = 21, RULE_metadataOption = 22, RULE_deprecated_metadata = 23, - RULE_metricsCommand = 24, RULE_evalCommand = 25, RULE_statsCommand = 26, - RULE_aggFields = 27, RULE_aggField = 28, RULE_qualifiedName = 29, RULE_qualifiedNamePattern = 30, - RULE_qualifiedNamePatterns = 31, RULE_identifier = 32, RULE_identifierPattern = 33, - RULE_constant = 34, RULE_parameter = 35, RULE_identifierOrParameter = 36, - RULE_limitCommand = 37, RULE_sortCommand = 38, RULE_orderExpression = 39, - RULE_keepCommand = 40, RULE_dropCommand = 41, RULE_renameCommand = 42, - RULE_renameClause = 43, RULE_dissectCommand = 44, RULE_grokCommand = 45, - RULE_mvExpandCommand = 46, RULE_commandOptions = 47, RULE_commandOption = 48, - RULE_booleanValue = 49, RULE_numericValue = 50, RULE_decimalValue = 51, - RULE_integerValue = 52, RULE_string = 53, RULE_comparisonOperator = 54, - RULE_explainCommand = 55, RULE_subqueryExpression = 56, RULE_showCommand = 57, - RULE_enrichCommand = 58, RULE_enrichWithClause = 59, RULE_lookupCommand = 60, - RULE_inlinestatsCommand = 61; + RULE_singleStatement = 0, RULE_query = 1, RULE_sourceCommand = 2, RULE_processingCommand = 3, + RULE_whereCommand = 4, RULE_booleanExpression = 5, RULE_regexBooleanExpression = 6, + RULE_matchBooleanExpression = 7, RULE_valueExpression = 8, RULE_operatorExpression = 9, + RULE_primaryExpression = 10, RULE_functionExpression = 11, RULE_functionName = 12, + RULE_dataType = 13, RULE_rowCommand = 14, RULE_fields = 15, RULE_field = 16, + RULE_fromCommand = 17, RULE_indexPattern = 18, RULE_clusterString = 19, + RULE_indexString = 20, RULE_metadata = 21, RULE_metadataOption = 22, RULE_deprecated_metadata = 23, + RULE_metricsCommand = 24, RULE_evalCommand = 25, RULE_statsCommand = 26, + RULE_aggFields = 27, RULE_aggField = 28, RULE_qualifiedName = 29, RULE_qualifiedNamePattern = 30, + RULE_qualifiedNamePatterns = 31, RULE_identifier = 32, RULE_identifierPattern = 33, + RULE_constant = 34, RULE_parameter = 35, RULE_identifierOrParameter = 36, + RULE_limitCommand = 37, RULE_sortCommand = 38, RULE_orderExpression = 39, + RULE_keepCommand = 40, RULE_dropCommand = 41, RULE_renameCommand = 42, + RULE_renameClause = 43, RULE_dissectCommand = 44, RULE_grokCommand = 45, + RULE_mvExpandCommand = 46, RULE_commandOptions = 47, RULE_commandOption = 48, + RULE_booleanValue = 49, RULE_numericValue = 50, RULE_decimalValue = 51, + RULE_integerValue = 52, RULE_string = 53, RULE_comparisonOperator = 54, + RULE_explainCommand = 55, RULE_subqueryExpression = 56, RULE_showCommand = 57, + RULE_enrichCommand = 58, RULE_enrichWithClause = 59, RULE_lookupCommand = 60, + RULE_inlinestatsCommand = 61, RULE_joinCommand = 62, RULE_joinTarget = 63, + RULE_joinCondition = 64, RULE_joinPredicate = 65; private static String[] makeRuleNames() { return new String[] { - "singleStatement", "query", "sourceCommand", "processingCommand", "whereCommand", - "booleanExpression", "regexBooleanExpression", "matchBooleanExpression", - "valueExpression", "operatorExpression", "primaryExpression", "functionExpression", - "functionName", "dataType", "rowCommand", "fields", "field", "fromCommand", - "indexPattern", "clusterString", "indexString", "metadata", "metadataOption", - "deprecated_metadata", "metricsCommand", "evalCommand", "statsCommand", - "aggFields", "aggField", "qualifiedName", "qualifiedNamePattern", "qualifiedNamePatterns", - "identifier", "identifierPattern", "constant", "parameter", "identifierOrParameter", - "limitCommand", "sortCommand", "orderExpression", "keepCommand", "dropCommand", - "renameCommand", "renameClause", "dissectCommand", "grokCommand", "mvExpandCommand", - "commandOptions", "commandOption", "booleanValue", "numericValue", "decimalValue", - "integerValue", "string", "comparisonOperator", "explainCommand", "subqueryExpression", - "showCommand", "enrichCommand", "enrichWithClause", "lookupCommand", - "inlinestatsCommand" + "singleStatement", "query", "sourceCommand", "processingCommand", "whereCommand", + "booleanExpression", "regexBooleanExpression", "matchBooleanExpression", + "valueExpression", "operatorExpression", "primaryExpression", "functionExpression", + "functionName", "dataType", "rowCommand", "fields", "field", "fromCommand", + "indexPattern", "clusterString", "indexString", "metadata", "metadataOption", + "deprecated_metadata", "metricsCommand", "evalCommand", "statsCommand", + "aggFields", "aggField", "qualifiedName", "qualifiedNamePattern", "qualifiedNamePatterns", + "identifier", "identifierPattern", "constant", "parameter", "identifierOrParameter", + "limitCommand", "sortCommand", "orderExpression", "keepCommand", "dropCommand", + "renameCommand", "renameClause", "dissectCommand", "grokCommand", "mvExpandCommand", + "commandOptions", "commandOption", "booleanValue", "numericValue", "decimalValue", + "integerValue", "string", "comparisonOperator", "explainCommand", "subqueryExpression", + "showCommand", "enrichCommand", "enrichWithClause", "lookupCommand", + "inlinestatsCommand", "joinCommand", "joinTarget", "joinCondition", "joinPredicate" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, "'dissect'", "'drop'", "'enrich'", "'eval'", "'explain'", "'from'", - "'grok'", "'keep'", "'limit'", "'mv_expand'", "'rename'", "'row'", "'show'", - "'sort'", "'stats'", "'where'", null, null, null, null, null, null, null, - "':'", "'|'", null, null, null, "'by'", "'and'", "'asc'", "'='", "'::'", - "','", "'desc'", "'.'", "'false'", "'first'", "'in'", "'is'", "'last'", - "'like'", "'('", "'not'", "'null'", "'nulls'", "'or'", "'?'", "'rlike'", - "')'", "'true'", "'=='", "'=~'", "'!='", "'<'", "'<='", "'>'", "'>='", - "'+'", "'-'", "'*'", "'/'", "'%'", null, null, "']'", null, null, null, - null, null, null, null, null, "'metadata'", null, null, null, null, null, - null, null, null, "'as'", null, null, null, "'on'", "'with'", null, null, - null, null, null, null, null, null, null, null, "'info'" + null, "'dissect'", "'drop'", "'enrich'", "'eval'", "'explain'", "'from'", + "'grok'", "'keep'", "'limit'", "'mv_expand'", "'rename'", "'row'", "'show'", + "'sort'", "'stats'", "'where'", null, null, null, null, null, null, null, + null, null, null, null, null, "'|'", null, null, null, "'by'", "'and'", + "'asc'", "'='", "'::'", "':'", "','", "'desc'", "'.'", "'false'", "'first'", + "'in'", "'is'", "'last'", "'like'", "'('", "'not'", "'null'", "'nulls'", + "'or'", "'?'", "'rlike'", "')'", "'true'", "'=='", "'=~'", "'!='", "'<'", + "'<='", "'>'", "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", null, null, + "']'", null, null, null, null, null, null, null, null, "'metadata'", + null, null, null, null, null, null, null, null, "'as'", null, null, null, + "'on'", "'with'", null, null, null, null, null, null, null, null, null, + null, "'info'", null, null, null, null, null, null, null, null, null, + null, null, null, null, "'USING'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", - "KEEP", "LIMIT", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", - "WHERE", "DEV_INLINESTATS", "DEV_LOOKUP", "DEV_METRICS", "UNKNOWN_CMD", - "LINE_COMMENT", "MULTILINE_COMMENT", "WS", "COLON", "PIPE", "QUOTED_STRING", - "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", - "COMMA", "DESC", "DOT", "FALSE", "FIRST", "IN", "IS", "LAST", "LIKE", - "LP", "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", "EQ", - "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", - "SLASH", "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", - "UNQUOTED_IDENTIFIER", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", - "EXPR_WS", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", - "METADATA", "UNQUOTED_SOURCE", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", - "FROM_WS", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", - "PROJECT_WS", "AS", "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", - "RENAME_WS", "ON", "WITH", "ENRICH_POLICY_NAME", "ENRICH_LINE_COMMENT", - "ENRICH_MULTILINE_COMMENT", "ENRICH_WS", "ENRICH_FIELD_LINE_COMMENT", - "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "MVEXPAND_LINE_COMMENT", - "MVEXPAND_MULTILINE_COMMENT", "MVEXPAND_WS", "INFO", "SHOW_LINE_COMMENT", - "SHOW_MULTILINE_COMMENT", "SHOW_WS", "SETTING", "SETTING_LINE_COMMENT", - "SETTTING_MULTILINE_COMMENT", "SETTING_WS", "LOOKUP_LINE_COMMENT", "LOOKUP_MULTILINE_COMMENT", - "LOOKUP_WS", "LOOKUP_FIELD_LINE_COMMENT", "LOOKUP_FIELD_MULTILINE_COMMENT", - "LOOKUP_FIELD_WS", "METRICS_LINE_COMMENT", "METRICS_MULTILINE_COMMENT", - "METRICS_WS", "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", + null, "DISSECT", "DROP", "ENRICH", "EVAL", "EXPLAIN", "FROM", "GROK", + "KEEP", "LIMIT", "MV_EXPAND", "RENAME", "ROW", "SHOW", "SORT", "STATS", + "WHERE", "DEV_INLINESTATS", "DEV_LOOKUP", "DEV_METRICS", "DEV_JOIN", + "DEV_JOIN_FULL", "DEV_JOIN_LEFT", "DEV_JOIN_RIGHT", "DEV_JOIN_LOOKUP", + "UNKNOWN_CMD", "LINE_COMMENT", "MULTILINE_COMMENT", "WS", "PIPE", "QUOTED_STRING", + "INTEGER_LITERAL", "DECIMAL_LITERAL", "BY", "AND", "ASC", "ASSIGN", "CAST_OP", + "COLON", "COMMA", "DESC", "DOT", "FALSE", "FIRST", "IN", "IS", "LAST", + "LIKE", "LP", "NOT", "NULL", "NULLS", "OR", "PARAM", "RLIKE", "RP", "TRUE", + "EQ", "CIEQ", "NEQ", "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", + "SLASH", "PERCENT", "NAMED_OR_POSITIONAL_PARAM", "OPENING_BRACKET", "CLOSING_BRACKET", + "UNQUOTED_IDENTIFIER", "QUOTED_IDENTIFIER", "EXPR_LINE_COMMENT", "EXPR_MULTILINE_COMMENT", + "EXPR_WS", "EXPLAIN_WS", "EXPLAIN_LINE_COMMENT", "EXPLAIN_MULTILINE_COMMENT", + "METADATA", "UNQUOTED_SOURCE", "FROM_LINE_COMMENT", "FROM_MULTILINE_COMMENT", + "FROM_WS", "ID_PATTERN", "PROJECT_LINE_COMMENT", "PROJECT_MULTILINE_COMMENT", + "PROJECT_WS", "AS", "RENAME_LINE_COMMENT", "RENAME_MULTILINE_COMMENT", + "RENAME_WS", "ON", "WITH", "ENRICH_POLICY_NAME", "ENRICH_LINE_COMMENT", + "ENRICH_MULTILINE_COMMENT", "ENRICH_WS", "ENRICH_FIELD_LINE_COMMENT", + "ENRICH_FIELD_MULTILINE_COMMENT", "ENRICH_FIELD_WS", "MVEXPAND_LINE_COMMENT", + "MVEXPAND_MULTILINE_COMMENT", "MVEXPAND_WS", "INFO", "SHOW_LINE_COMMENT", + "SHOW_MULTILINE_COMMENT", "SHOW_WS", "SETTING", "SETTING_LINE_COMMENT", + "SETTTING_MULTILINE_COMMENT", "SETTING_WS", "LOOKUP_LINE_COMMENT", "LOOKUP_MULTILINE_COMMENT", + "LOOKUP_WS", "LOOKUP_FIELD_LINE_COMMENT", "LOOKUP_FIELD_MULTILINE_COMMENT", + "LOOKUP_FIELD_WS", "USING", "JOIN_LINE_COMMENT", "JOIN_MULTILINE_COMMENT", + "JOIN_WS", "METRICS_LINE_COMMENT", "METRICS_MULTILINE_COMMENT", "METRICS_WS", + "CLOSING_METRICS_LINE_COMMENT", "CLOSING_METRICS_MULTILINE_COMMENT", "CLOSING_METRICS_WS" }; } @@ -231,9 +226,9 @@ public final SingleStatementContext singleStatement() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(124); + setState(132); query(0); - setState(125); + setState(133); match(EOF); } } @@ -255,7 +250,7 @@ public QueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_query; } - + @SuppressWarnings("this-escape") public QueryContext() { } public void copyFrom(QueryContext ctx) { @@ -329,11 +324,11 @@ private QueryContext query(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(128); + setState(136); sourceCommand(); } _ctx.stop = _input.LT(-1); - setState(135); + setState(143); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -344,16 +339,16 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new CompositeQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(130); + setState(138); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(131); + setState(139); match(PIPE); - setState(132); + setState(140); processingCommand(); } - } + } } - setState(137); + setState(145); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); } @@ -411,43 +406,43 @@ public final SourceCommandContext sourceCommand() throws RecognitionException { SourceCommandContext _localctx = new SourceCommandContext(_ctx, getState()); enterRule(_localctx, 4, RULE_sourceCommand); try { - setState(144); + setState(152); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(138); + setState(146); explainCommand(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(139); + setState(147); fromCommand(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(140); + setState(148); rowCommand(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(141); + setState(149); showCommand(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(142); + setState(150); if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); - setState(143); + setState(151); metricsCommand(); } break; @@ -508,6 +503,9 @@ public InlinestatsCommandContext inlinestatsCommand() { public LookupCommandContext lookupCommand() { return getRuleContext(LookupCommandContext.class,0); } + public JoinCommandContext joinCommand() { + return getRuleContext(JoinCommandContext.class,0); + } @SuppressWarnings("this-escape") public ProcessingCommandContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -532,111 +530,120 @@ public final ProcessingCommandContext processingCommand() throws RecognitionExce ProcessingCommandContext _localctx = new ProcessingCommandContext(_ctx, getState()); enterRule(_localctx, 6, RULE_processingCommand); try { - setState(162); + setState(172); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(146); + setState(154); evalCommand(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(147); + setState(155); whereCommand(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(148); + setState(156); keepCommand(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(149); + setState(157); limitCommand(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(150); + setState(158); statsCommand(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(151); + setState(159); sortCommand(); } break; case 7: enterOuterAlt(_localctx, 7); { - setState(152); + setState(160); dropCommand(); } break; case 8: enterOuterAlt(_localctx, 8); { - setState(153); + setState(161); renameCommand(); } break; case 9: enterOuterAlt(_localctx, 9); { - setState(154); + setState(162); dissectCommand(); } break; case 10: enterOuterAlt(_localctx, 10); { - setState(155); + setState(163); grokCommand(); } break; case 11: enterOuterAlt(_localctx, 11); { - setState(156); + setState(164); enrichCommand(); } break; case 12: enterOuterAlt(_localctx, 12); { - setState(157); + setState(165); mvExpandCommand(); } break; case 13: enterOuterAlt(_localctx, 13); { - setState(158); + setState(166); if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); - setState(159); + setState(167); inlinestatsCommand(); } break; case 14: enterOuterAlt(_localctx, 14); { - setState(160); + setState(168); if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); - setState(161); + setState(169); lookupCommand(); } break; + case 15: + enterOuterAlt(_localctx, 15); + { + setState(170); + if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); + setState(171); + joinCommand(); + } + break; } } catch (RecognitionException re) { @@ -682,9 +689,9 @@ public final WhereCommandContext whereCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(164); + setState(174); match(WHERE); - setState(165); + setState(175); booleanExpression(0); } } @@ -706,7 +713,7 @@ public BooleanExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_booleanExpression; } - + @SuppressWarnings("this-escape") public BooleanExpressionContext() { } public void copyFrom(BooleanExpressionContext ctx) { @@ -900,7 +907,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(197); + setState(206); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: @@ -909,9 +916,9 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _ctx = _localctx; _prevctx = _localctx; - setState(168); + setState(178); match(NOT); - setState(169); + setState(179); booleanExpression(8); } break; @@ -920,7 +927,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new BooleanDefaultContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(170); + setState(180); valueExpression(); } break; @@ -929,7 +936,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new RegexExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(171); + setState(181); regexBooleanExpression(); } break; @@ -938,41 +945,41 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalInContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(172); + setState(182); valueExpression(); - setState(174); + setState(184); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(173); + setState(183); match(NOT); } } - setState(176); + setState(186); match(IN); - setState(177); + setState(187); match(LP); - setState(178); + setState(188); valueExpression(); - setState(183); + setState(193); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(179); + setState(189); match(COMMA); - setState(180); + setState(190); valueExpression(); } } - setState(185); + setState(195); _errHandler.sync(this); _la = _input.LA(1); } - setState(186); + setState(196); match(RP); } break; @@ -981,21 +988,21 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new IsNullContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(188); + setState(198); valueExpression(); - setState(189); + setState(199); match(IS); - setState(191); + setState(201); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(190); + setState(200); match(NOT); } } - setState(193); + setState(203); match(NULL); } break; @@ -1004,15 +1011,13 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new MatchExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(195); - if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); - setState(196); + setState(205); matchBooleanExpression(); } break; } _ctx.stop = _input.LT(-1); - setState(207); + setState(216); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1020,7 +1025,7 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(205); + setState(214); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { case 1: @@ -1028,11 +1033,11 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(199); + setState(208); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(200); + setState(209); ((LogicalBinaryContext)_localctx).operator = match(AND); - setState(201); + setState(210); ((LogicalBinaryContext)_localctx).right = booleanExpression(6); } break; @@ -1041,18 +1046,18 @@ private BooleanExpressionContext booleanExpression(int _p) throws RecognitionExc _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); ((LogicalBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); - setState(202); + setState(211); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(203); + setState(212); ((LogicalBinaryContext)_localctx).operator = match(OR); - setState(204); + setState(213); ((LogicalBinaryContext)_localctx).right = booleanExpression(5); } break; } - } + } } - setState(209); + setState(218); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,8,_ctx); } @@ -1107,48 +1112,48 @@ public final RegexBooleanExpressionContext regexBooleanExpression() throws Recog enterRule(_localctx, 12, RULE_regexBooleanExpression); int _la; try { - setState(224); + setState(233); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(210); + setState(219); valueExpression(); - setState(212); + setState(221); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(211); + setState(220); match(NOT); } } - setState(214); + setState(223); ((RegexBooleanExpressionContext)_localctx).kind = match(LIKE); - setState(215); + setState(224); ((RegexBooleanExpressionContext)_localctx).pattern = string(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(217); + setState(226); valueExpression(); - setState(219); + setState(228); _errHandler.sync(this); _la = _input.LA(1); if (_la==NOT) { { - setState(218); + setState(227); match(NOT); } } - setState(221); + setState(230); ((RegexBooleanExpressionContext)_localctx).kind = match(RLIKE); - setState(222); + setState(231); ((RegexBooleanExpressionContext)_localctx).pattern = string(); } break; @@ -1202,11 +1207,11 @@ public final MatchBooleanExpressionContext matchBooleanExpression() throws Recog try { enterOuterAlt(_localctx, 1); { - setState(226); + setState(235); ((MatchBooleanExpressionContext)_localctx).fieldExp = qualifiedName(); - setState(227); + setState(236); match(COLON); - setState(228); + setState(237); ((MatchBooleanExpressionContext)_localctx).queryString = constant(); } } @@ -1228,7 +1233,7 @@ public ValueExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_valueExpression; } - + @SuppressWarnings("this-escape") public ValueExpressionContext() { } public void copyFrom(ValueExpressionContext ctx) { @@ -1290,14 +1295,14 @@ public final ValueExpressionContext valueExpression() throws RecognitionExceptio ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, getState()); enterRule(_localctx, 16, RULE_valueExpression); try { - setState(235); + setState(244); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: _localctx = new ValueExpressionDefaultContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(230); + setState(239); operatorExpression(0); } break; @@ -1305,11 +1310,11 @@ public final ValueExpressionContext valueExpression() throws RecognitionExceptio _localctx = new ComparisonContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(231); + setState(240); ((ComparisonContext)_localctx).left = operatorExpression(0); - setState(232); + setState(241); comparisonOperator(); - setState(233); + setState(242); ((ComparisonContext)_localctx).right = operatorExpression(0); } break; @@ -1333,7 +1338,7 @@ public OperatorExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_operatorExpression; } - + @SuppressWarnings("this-escape") public OperatorExpressionContext() { } public void copyFrom(OperatorExpressionContext ctx) { @@ -1434,7 +1439,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE int _alt; enterOuterAlt(_localctx, 1); { - setState(241); + setState(250); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: @@ -1443,7 +1448,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _ctx = _localctx; _prevctx = _localctx; - setState(238); + setState(247); primaryExpression(0); } break; @@ -1452,7 +1457,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticUnaryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(239); + setState(248); ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -1463,13 +1468,13 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(240); + setState(249); operatorExpression(3); } break; } _ctx.stop = _input.LT(-1); - setState(251); + setState(260); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,15,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1477,7 +1482,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(249); + setState(258); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: @@ -1485,12 +1490,12 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticBinaryContext(new OperatorExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_operatorExpression); - setState(243); + setState(252); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(244); + setState(253); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); - if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & -2305843009213693952L) != 0)) ) { + if ( !(((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & 7L) != 0)) ) { ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { @@ -1498,7 +1503,7 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(245); + setState(254); ((ArithmeticBinaryContext)_localctx).right = operatorExpression(3); } break; @@ -1507,9 +1512,9 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _localctx = new ArithmeticBinaryContext(new OperatorExpressionContext(_parentctx, _parentState)); ((ArithmeticBinaryContext)_localctx).left = _prevctx; pushNewRecursionContext(_localctx, _startState, RULE_operatorExpression); - setState(246); + setState(255); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(247); + setState(256); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { @@ -1520,14 +1525,14 @@ private OperatorExpressionContext operatorExpression(int _p) throws RecognitionE _errHandler.reportMatch(this); consume(); } - setState(248); + setState(257); ((ArithmeticBinaryContext)_localctx).right = operatorExpression(2); } break; } - } + } } - setState(253); + setState(262); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,15,_ctx); } @@ -1551,7 +1556,7 @@ public PrimaryExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_primaryExpression; } - + @SuppressWarnings("this-escape") public PrimaryExpressionContext() { } public void copyFrom(PrimaryExpressionContext ctx) { @@ -1685,7 +1690,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc int _alt; enterOuterAlt(_localctx, 1); { - setState(262); + setState(271); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,16,_ctx) ) { case 1: @@ -1694,7 +1699,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _ctx = _localctx; _prevctx = _localctx; - setState(255); + setState(264); constant(); } break; @@ -1703,7 +1708,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new DereferenceContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(256); + setState(265); qualifiedName(); } break; @@ -1712,7 +1717,7 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new FunctionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(257); + setState(266); functionExpression(); } break; @@ -1721,17 +1726,17 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc _localctx = new ParenthesizedExpressionContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(258); + setState(267); match(LP); - setState(259); + setState(268); booleanExpression(0); - setState(260); + setState(269); match(RP); } break; } _ctx.stop = _input.LT(-1); - setState(269); + setState(278); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,17,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1742,16 +1747,16 @@ private PrimaryExpressionContext primaryExpression(int _p) throws RecognitionExc { _localctx = new InlineCastContext(new PrimaryExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_primaryExpression); - setState(264); + setState(273); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(265); + setState(274); match(CAST_OP); - setState(266); + setState(275); dataType(); } - } + } } - setState(271); + setState(280); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,17,_ctx); } @@ -1813,37 +1818,37 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(272); + setState(281); functionName(); - setState(273); + setState(282); match(LP); - setState(283); + setState(292); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) { case 1: { - setState(274); + setState(283); match(ASTERISK); } break; case 2: { { - setState(275); + setState(284); booleanExpression(0); - setState(280); + setState(289); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(276); + setState(285); match(COMMA); - setState(277); + setState(286); booleanExpression(0); } } - setState(282); + setState(291); _errHandler.sync(this); _la = _input.LA(1); } @@ -1851,7 +1856,7 @@ public final FunctionExpressionContext functionExpression() throws RecognitionEx } break; } - setState(285); + setState(294); match(RP); } } @@ -1897,7 +1902,7 @@ public final FunctionNameContext functionName() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(287); + setState(296); identifierOrParameter(); } } @@ -1919,7 +1924,7 @@ public DataTypeContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_dataType; } - + @SuppressWarnings("this-escape") public DataTypeContext() { } public void copyFrom(DataTypeContext ctx) { @@ -1955,7 +1960,7 @@ public final DataTypeContext dataType() throws RecognitionException { _localctx = new ToDataTypeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(289); + setState(298); identifier(); } } @@ -2002,9 +2007,9 @@ public final RowCommandContext rowCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(291); + setState(300); match(ROW); - setState(292); + setState(301); fields(); } } @@ -2058,23 +2063,23 @@ public final FieldsContext fields() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(294); + setState(303); field(); - setState(299); + setState(308); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(295); + setState(304); match(COMMA); - setState(296); + setState(305); field(); } - } + } } - setState(301); + setState(310); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); } @@ -2126,19 +2131,19 @@ public final FieldContext field() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(305); + setState(314); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { case 1: { - setState(302); + setState(311); qualifiedName(); - setState(303); + setState(312); match(ASSIGN); } break; } - setState(307); + setState(316); booleanExpression(0); } } @@ -2196,34 +2201,34 @@ public final FromCommandContext fromCommand() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(309); + setState(318); match(FROM); - setState(310); + setState(319); indexPattern(); - setState(315); + setState(324); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(311); + setState(320); match(COMMA); - setState(312); + setState(321); indexPattern(); } - } + } } - setState(317); + setState(326); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); } - setState(319); + setState(328); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) { case 1: { - setState(318); + setState(327); metadata(); } break; @@ -2276,19 +2281,19 @@ public final IndexPatternContext indexPattern() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(324); + setState(333); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { case 1: { - setState(321); + setState(330); clusterString(); - setState(322); + setState(331); match(COLON); } break; } - setState(326); + setState(335); indexString(); } } @@ -2332,7 +2337,7 @@ public final ClusterStringContext clusterString() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(328); + setState(337); match(UNQUOTED_SOURCE); } } @@ -2378,7 +2383,7 @@ public final IndexStringContext indexString() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(330); + setState(339); _la = _input.LA(1); if ( !(_la==QUOTED_STRING || _la==UNQUOTED_SOURCE) ) { _errHandler.recoverInline(this); @@ -2433,20 +2438,20 @@ public final MetadataContext metadata() throws RecognitionException { MetadataContext _localctx = new MetadataContext(_ctx, getState()); enterRule(_localctx, 42, RULE_metadata); try { - setState(334); + setState(343); _errHandler.sync(this); switch (_input.LA(1)) { case METADATA: enterOuterAlt(_localctx, 1); { - setState(332); + setState(341); metadataOption(); } break; case OPENING_BRACKET: enterOuterAlt(_localctx, 2); { - setState(333); + setState(342); deprecated_metadata(); } break; @@ -2503,25 +2508,25 @@ public final MetadataOptionContext metadataOption() throws RecognitionException int _alt; enterOuterAlt(_localctx, 1); { - setState(336); + setState(345); match(METADATA); - setState(337); + setState(346); match(UNQUOTED_SOURCE); - setState(342); + setState(351); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,26,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(338); + setState(347); match(COMMA); - setState(339); + setState(348); match(UNQUOTED_SOURCE); } - } + } } - setState(344); + setState(353); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,26,_ctx); } @@ -2570,11 +2575,11 @@ public final Deprecated_metadataContext deprecated_metadata() throws Recognition try { enterOuterAlt(_localctx, 1); { - setState(345); + setState(354); match(OPENING_BRACKET); - setState(346); + setState(355); metadataOption(); - setState(347); + setState(356); match(CLOSING_BRACKET); } } @@ -2638,46 +2643,46 @@ public final MetricsCommandContext metricsCommand() throws RecognitionException int _alt; enterOuterAlt(_localctx, 1); { - setState(349); + setState(358); match(DEV_METRICS); - setState(350); + setState(359); indexPattern(); - setState(355); + setState(364); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,27,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(351); + setState(360); match(COMMA); - setState(352); + setState(361); indexPattern(); } - } + } } - setState(357); + setState(366); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,27,_ctx); } - setState(359); + setState(368); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { case 1: { - setState(358); + setState(367); ((MetricsCommandContext)_localctx).aggregates = aggFields(); } break; } - setState(363); + setState(372); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: { - setState(361); + setState(370); match(BY); - setState(362); + setState(371); ((MetricsCommandContext)_localctx).grouping = fields(); } break; @@ -2727,9 +2732,9 @@ public final EvalCommandContext evalCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(365); + setState(374); match(EVAL); - setState(366); + setState(375); fields(); } } @@ -2782,26 +2787,26 @@ public final StatsCommandContext statsCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(368); + setState(377); match(STATS); - setState(370); + setState(379); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { case 1: { - setState(369); + setState(378); ((StatsCommandContext)_localctx).stats = aggFields(); } break; } - setState(374); + setState(383); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,31,_ctx) ) { case 1: { - setState(372); + setState(381); match(BY); - setState(373); + setState(382); ((StatsCommandContext)_localctx).grouping = fields(); } break; @@ -2858,23 +2863,23 @@ public final AggFieldsContext aggFields() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(376); + setState(385); aggField(); - setState(381); + setState(390); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,32,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(377); + setState(386); match(COMMA); - setState(378); + setState(387); aggField(); } - } + } } - setState(383); + setState(392); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,32,_ctx); } @@ -2926,16 +2931,16 @@ public final AggFieldContext aggField() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(384); + setState(393); field(); - setState(387); + setState(396); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { case 1: { - setState(385); + setState(394); match(WHERE); - setState(386); + setState(395); booleanExpression(0); } break; @@ -2992,23 +2997,23 @@ public final QualifiedNameContext qualifiedName() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(389); + setState(398); identifierOrParameter(); - setState(394); + setState(403); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,34,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(390); + setState(399); match(DOT); - setState(391); + setState(400); identifierOrParameter(); } - } + } } - setState(396); + setState(405); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,34,_ctx); } @@ -3064,23 +3069,23 @@ public final QualifiedNamePatternContext qualifiedNamePattern() throws Recogniti int _alt; enterOuterAlt(_localctx, 1); { - setState(397); + setState(406); identifierPattern(); - setState(402); + setState(411); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,35,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(398); + setState(407); match(DOT); - setState(399); + setState(408); identifierPattern(); } - } + } } - setState(404); + setState(413); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,35,_ctx); } @@ -3136,23 +3141,23 @@ public final QualifiedNamePatternsContext qualifiedNamePatterns() throws Recogni int _alt; enterOuterAlt(_localctx, 1); { - setState(405); + setState(414); qualifiedNamePattern(); - setState(410); + setState(419); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,36,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(406); + setState(415); match(COMMA); - setState(407); + setState(416); qualifiedNamePattern(); } - } + } } - setState(412); + setState(421); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,36,_ctx); } @@ -3200,7 +3205,7 @@ public final IdentifierContext identifier() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(413); + setState(422); _la = _input.LA(1); if ( !(_la==UNQUOTED_IDENTIFIER || _la==QUOTED_IDENTIFIER) ) { _errHandler.recoverInline(this); @@ -3253,22 +3258,22 @@ public final IdentifierPatternContext identifierPattern() throws RecognitionExce IdentifierPatternContext _localctx = new IdentifierPatternContext(_ctx, getState()); enterRule(_localctx, 66, RULE_identifierPattern); try { - setState(418); + setState(427); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(415); + setState(424); match(ID_PATTERN); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(416); + setState(425); if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); - setState(417); + setState(426); parameter(); } break; @@ -3292,7 +3297,7 @@ public ConstantContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_constant; } - + @SuppressWarnings("this-escape") public ConstantContext() { } public void copyFrom(ConstantContext ctx) { @@ -3541,14 +3546,14 @@ public final ConstantContext constant() throws RecognitionException { enterRule(_localctx, 68, RULE_constant); int _la; try { - setState(462); + setState(471); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { case 1: _localctx = new NullLiteralContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(420); + setState(429); match(NULL); } break; @@ -3556,9 +3561,9 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new QualifiedIntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(421); + setState(430); integerValue(); - setState(422); + setState(431); match(UNQUOTED_IDENTIFIER); } break; @@ -3566,7 +3571,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new DecimalLiteralContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(424); + setState(433); decimalValue(); } break; @@ -3574,7 +3579,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new IntegerLiteralContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(425); + setState(434); integerValue(); } break; @@ -3582,7 +3587,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanLiteralContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(426); + setState(435); booleanValue(); } break; @@ -3590,7 +3595,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new InputParameterContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(427); + setState(436); parameter(); } break; @@ -3598,7 +3603,7 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringLiteralContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(428); + setState(437); string(); } break; @@ -3606,27 +3611,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new NumericArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(429); + setState(438); match(OPENING_BRACKET); - setState(430); + setState(439); numericValue(); - setState(435); + setState(444); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(431); + setState(440); match(COMMA); - setState(432); + setState(441); numericValue(); } } - setState(437); + setState(446); _errHandler.sync(this); _la = _input.LA(1); } - setState(438); + setState(447); match(CLOSING_BRACKET); } break; @@ -3634,27 +3639,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new BooleanArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(440); + setState(449); match(OPENING_BRACKET); - setState(441); + setState(450); booleanValue(); - setState(446); + setState(455); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(442); + setState(451); match(COMMA); - setState(443); + setState(452); booleanValue(); } } - setState(448); + setState(457); _errHandler.sync(this); _la = _input.LA(1); } - setState(449); + setState(458); match(CLOSING_BRACKET); } break; @@ -3662,27 +3667,27 @@ public final ConstantContext constant() throws RecognitionException { _localctx = new StringArrayLiteralContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(451); + setState(460); match(OPENING_BRACKET); - setState(452); + setState(461); string(); - setState(457); + setState(466); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(453); + setState(462); match(COMMA); - setState(454); + setState(463); string(); } } - setState(459); + setState(468); _errHandler.sync(this); _la = _input.LA(1); } - setState(460); + setState(469); match(CLOSING_BRACKET); } break; @@ -3706,7 +3711,7 @@ public ParameterContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_parameter; } - + @SuppressWarnings("this-escape") public ParameterContext() { } public void copyFrom(ParameterContext ctx) { @@ -3756,14 +3761,14 @@ public final ParameterContext parameter() throws RecognitionException { ParameterContext _localctx = new ParameterContext(_ctx, getState()); enterRule(_localctx, 70, RULE_parameter); try { - setState(466); + setState(475); _errHandler.sync(this); switch (_input.LA(1)) { case PARAM: _localctx = new InputParamContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(464); + setState(473); match(PARAM); } break; @@ -3771,7 +3776,7 @@ public final ParameterContext parameter() throws RecognitionException { _localctx = new InputNamedOrPositionalParamContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(465); + setState(474); match(NAMED_OR_POSITIONAL_PARAM); } break; @@ -3822,22 +3827,22 @@ public final IdentifierOrParameterContext identifierOrParameter() throws Recogni IdentifierOrParameterContext _localctx = new IdentifierOrParameterContext(_ctx, getState()); enterRule(_localctx, 72, RULE_identifierOrParameter); try { - setState(471); + setState(480); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(468); + setState(477); identifier(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(469); + setState(478); if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()"); - setState(470); + setState(479); parameter(); } break; @@ -3884,9 +3889,9 @@ public final LimitCommandContext limitCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(473); + setState(482); match(LIMIT); - setState(474); + setState(483); match(INTEGER_LITERAL); } } @@ -3941,25 +3946,25 @@ public final SortCommandContext sortCommand() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(476); + setState(485); match(SORT); - setState(477); + setState(486); orderExpression(); - setState(482); + setState(491); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,44,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(478); + setState(487); match(COMMA); - setState(479); + setState(488); orderExpression(); } - } + } } - setState(484); + setState(493); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,44,_ctx); } @@ -4015,14 +4020,14 @@ public final OrderExpressionContext orderExpression() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(485); + setState(494); booleanExpression(0); - setState(487); + setState(496); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,45,_ctx) ) { case 1: { - setState(486); + setState(495); ((OrderExpressionContext)_localctx).ordering = _input.LT(1); _la = _input.LA(1); if ( !(_la==ASC || _la==DESC) ) { @@ -4036,14 +4041,14 @@ public final OrderExpressionContext orderExpression() throws RecognitionExceptio } break; } - setState(491); + setState(500); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,46,_ctx) ) { case 1: { - setState(489); + setState(498); match(NULLS); - setState(490); + setState(499); ((OrderExpressionContext)_localctx).nullOrdering = _input.LT(1); _la = _input.LA(1); if ( !(_la==FIRST || _la==LAST) ) { @@ -4102,9 +4107,9 @@ public final KeepCommandContext keepCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(493); + setState(502); match(KEEP); - setState(494); + setState(503); qualifiedNamePatterns(); } } @@ -4151,9 +4156,9 @@ public final DropCommandContext dropCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(496); + setState(505); match(DROP); - setState(497); + setState(506); qualifiedNamePatterns(); } } @@ -4208,25 +4213,25 @@ public final RenameCommandContext renameCommand() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(499); + setState(508); match(RENAME); - setState(500); + setState(509); renameClause(); - setState(505); + setState(514); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,47,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(501); + setState(510); match(COMMA); - setState(502); + setState(511); renameClause(); } - } + } } - setState(507); + setState(516); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,47,_ctx); } @@ -4280,11 +4285,11 @@ public final RenameClauseContext renameClause() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(508); + setState(517); ((RenameClauseContext)_localctx).oldName = qualifiedNamePattern(); - setState(509); + setState(518); match(AS); - setState(510); + setState(519); ((RenameClauseContext)_localctx).newName = qualifiedNamePattern(); } } @@ -4337,18 +4342,18 @@ public final DissectCommandContext dissectCommand() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(512); + setState(521); match(DISSECT); - setState(513); + setState(522); primaryExpression(0); - setState(514); + setState(523); string(); - setState(516); + setState(525); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,48,_ctx) ) { case 1: { - setState(515); + setState(524); commandOptions(); } break; @@ -4401,11 +4406,11 @@ public final GrokCommandContext grokCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(518); + setState(527); match(GROK); - setState(519); + setState(528); primaryExpression(0); - setState(520); + setState(529); string(); } } @@ -4452,9 +4457,9 @@ public final MvExpandCommandContext mvExpandCommand() throws RecognitionExceptio try { enterOuterAlt(_localctx, 1); { - setState(522); + setState(531); match(MV_EXPAND); - setState(523); + setState(532); qualifiedName(); } } @@ -4508,23 +4513,23 @@ public final CommandOptionsContext commandOptions() throws RecognitionException int _alt; enterOuterAlt(_localctx, 1); { - setState(525); + setState(534); commandOption(); - setState(530); + setState(539); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,49,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(526); + setState(535); match(COMMA); - setState(527); + setState(536); commandOption(); } - } + } } - setState(532); + setState(541); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,49,_ctx); } @@ -4576,11 +4581,11 @@ public final CommandOptionContext commandOption() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(533); + setState(542); identifier(); - setState(534); + setState(543); match(ASSIGN); - setState(535); + setState(544); constant(); } } @@ -4626,7 +4631,7 @@ public final BooleanValueContext booleanValue() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(537); + setState(546); _la = _input.LA(1); if ( !(_la==FALSE || _la==TRUE) ) { _errHandler.recoverInline(this); @@ -4681,20 +4686,20 @@ public final NumericValueContext numericValue() throws RecognitionException { NumericValueContext _localctx = new NumericValueContext(_ctx, getState()); enterRule(_localctx, 100, RULE_numericValue); try { - setState(541); + setState(550); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(539); + setState(548); decimalValue(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(540); + setState(549); integerValue(); } break; @@ -4743,12 +4748,12 @@ public final DecimalValueContext decimalValue() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(544); + setState(553); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(543); + setState(552); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4761,7 +4766,7 @@ public final DecimalValueContext decimalValue() throws RecognitionException { } } - setState(546); + setState(555); match(DECIMAL_LITERAL); } } @@ -4808,12 +4813,12 @@ public final IntegerValueContext integerValue() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(549); + setState(558); _errHandler.sync(this); _la = _input.LA(1); if (_la==PLUS || _la==MINUS) { { - setState(548); + setState(557); _la = _input.LA(1); if ( !(_la==PLUS || _la==MINUS) ) { _errHandler.recoverInline(this); @@ -4826,7 +4831,7 @@ public final IntegerValueContext integerValue() throws RecognitionException { } } - setState(551); + setState(560); match(INTEGER_LITERAL); } } @@ -4870,7 +4875,7 @@ public final StringContext string() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(553); + setState(562); match(QUOTED_STRING); } } @@ -4920,9 +4925,9 @@ public final ComparisonOperatorContext comparisonOperator() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(555); + setState(564); _la = _input.LA(1); - if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 562949953421312000L) != 0)) ) { + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & -432345564227567616L) != 0)) ) { _errHandler.recoverInline(this); } else { @@ -4975,9 +4980,9 @@ public final ExplainCommandContext explainCommand() throws RecognitionException try { enterOuterAlt(_localctx, 1); { - setState(557); + setState(566); match(EXPLAIN); - setState(558); + setState(567); subqueryExpression(); } } @@ -5025,11 +5030,11 @@ public final SubqueryExpressionContext subqueryExpression() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(560); + setState(569); match(OPENING_BRACKET); - setState(561); + setState(570); query(0); - setState(562); + setState(571); match(CLOSING_BRACKET); } } @@ -5051,7 +5056,7 @@ public ShowCommandContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_showCommand; } - + @SuppressWarnings("this-escape") public ShowCommandContext() { } public void copyFrom(ShowCommandContext ctx) { @@ -5086,9 +5091,9 @@ public final ShowCommandContext showCommand() throws RecognitionException { _localctx = new ShowInfoContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(564); + setState(573); match(SHOW); - setState(565); + setState(574); match(INFO); } } @@ -5151,46 +5156,46 @@ public final EnrichCommandContext enrichCommand() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(567); + setState(576); match(ENRICH); - setState(568); + setState(577); ((EnrichCommandContext)_localctx).policyName = match(ENRICH_POLICY_NAME); - setState(571); + setState(580); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,53,_ctx) ) { case 1: { - setState(569); + setState(578); match(ON); - setState(570); + setState(579); ((EnrichCommandContext)_localctx).matchField = qualifiedNamePattern(); } break; } - setState(582); + setState(591); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,55,_ctx) ) { case 1: { - setState(573); + setState(582); match(WITH); - setState(574); + setState(583); enrichWithClause(); - setState(579); + setState(588); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,54,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(575); + setState(584); match(COMMA); - setState(576); + setState(585); enrichWithClause(); } - } + } } - setState(581); + setState(590); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,54,_ctx); } @@ -5247,19 +5252,19 @@ public final EnrichWithClauseContext enrichWithClause() throws RecognitionExcept try { enterOuterAlt(_localctx, 1); { - setState(587); + setState(596); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,56,_ctx) ) { case 1: { - setState(584); + setState(593); ((EnrichWithClauseContext)_localctx).newName = qualifiedNamePattern(); - setState(585); + setState(594); match(ASSIGN); } break; } - setState(589); + setState(598); ((EnrichWithClauseContext)_localctx).enrichField = qualifiedNamePattern(); } } @@ -5312,13 +5317,13 @@ public final LookupCommandContext lookupCommand() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(591); + setState(600); match(DEV_LOOKUP); - setState(592); + setState(601); ((LookupCommandContext)_localctx).tableName = indexPattern(); - setState(593); + setState(602); match(ON); - setState(594); + setState(603); ((LookupCommandContext)_localctx).matchFields = qualifiedNamePatterns(); } } @@ -5371,18 +5376,18 @@ public final InlinestatsCommandContext inlinestatsCommand() throws RecognitionEx try { enterOuterAlt(_localctx, 1); { - setState(596); + setState(605); match(DEV_INLINESTATS); - setState(597); + setState(606); ((InlinestatsCommandContext)_localctx).stats = aggFields(); - setState(600); + setState(609); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,57,_ctx) ) { case 1: { - setState(598); + setState(607); match(BY); - setState(599); + setState(608); ((InlinestatsCommandContext)_localctx).grouping = fields(); } break; @@ -5400,6 +5405,270 @@ public final InlinestatsCommandContext inlinestatsCommand() throws RecognitionEx return _localctx; } + @SuppressWarnings("CheckReturnValue") + public static class JoinCommandContext extends ParserRuleContext { + public Token type; + public TerminalNode DEV_JOIN() { return getToken(EsqlBaseParser.DEV_JOIN, 0); } + public JoinTargetContext joinTarget() { + return getRuleContext(JoinTargetContext.class,0); + } + public JoinConditionContext joinCondition() { + return getRuleContext(JoinConditionContext.class,0); + } + public TerminalNode DEV_JOIN_LOOKUP() { return getToken(EsqlBaseParser.DEV_JOIN_LOOKUP, 0); } + public TerminalNode DEV_JOIN_LEFT() { return getToken(EsqlBaseParser.DEV_JOIN_LEFT, 0); } + public TerminalNode DEV_JOIN_RIGHT() { return getToken(EsqlBaseParser.DEV_JOIN_RIGHT, 0); } + @SuppressWarnings("this-escape") + public JoinCommandContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinCommand; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterJoinCommand(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitJoinCommand(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitJoinCommand(this); + else return visitor.visitChildren(this); + } + } + + public final JoinCommandContext joinCommand() throws RecognitionException { + JoinCommandContext _localctx = new JoinCommandContext(_ctx, getState()); + enterRule(_localctx, 124, RULE_joinCommand); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(612); + _errHandler.sync(this); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 29360128L) != 0)) { + { + setState(611); + ((JoinCommandContext)_localctx).type = _input.LT(1); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 29360128L) != 0)) ) { + ((JoinCommandContext)_localctx).type = (Token)_errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + + setState(614); + match(DEV_JOIN); + setState(615); + joinTarget(); + setState(616); + joinCondition(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class JoinTargetContext extends ParserRuleContext { + public IdentifierContext index; + public IdentifierContext alias; + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public TerminalNode AS() { return getToken(EsqlBaseParser.AS, 0); } + @SuppressWarnings("this-escape") + public JoinTargetContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinTarget; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterJoinTarget(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitJoinTarget(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitJoinTarget(this); + else return visitor.visitChildren(this); + } + } + + public final JoinTargetContext joinTarget() throws RecognitionException { + JoinTargetContext _localctx = new JoinTargetContext(_ctx, getState()); + enterRule(_localctx, 126, RULE_joinTarget); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(618); + ((JoinTargetContext)_localctx).index = identifier(); + setState(621); + _errHandler.sync(this); + _la = _input.LA(1); + if (_la==AS) { + { + setState(619); + match(AS); + setState(620); + ((JoinTargetContext)_localctx).alias = identifier(); + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class JoinConditionContext extends ParserRuleContext { + public TerminalNode ON() { return getToken(EsqlBaseParser.ON, 0); } + public List joinPredicate() { + return getRuleContexts(JoinPredicateContext.class); + } + public JoinPredicateContext joinPredicate(int i) { + return getRuleContext(JoinPredicateContext.class,i); + } + public List COMMA() { return getTokens(EsqlBaseParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(EsqlBaseParser.COMMA, i); + } + @SuppressWarnings("this-escape") + public JoinConditionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinCondition; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterJoinCondition(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitJoinCondition(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitJoinCondition(this); + else return visitor.visitChildren(this); + } + } + + public final JoinConditionContext joinCondition() throws RecognitionException { + JoinConditionContext _localctx = new JoinConditionContext(_ctx, getState()); + enterRule(_localctx, 128, RULE_joinCondition); + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(623); + match(ON); + setState(624); + joinPredicate(); + setState(629); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,60,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(625); + match(COMMA); + setState(626); + joinPredicate(); + } + } + } + setState(631); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,60,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class JoinPredicateContext extends ParserRuleContext { + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + @SuppressWarnings("this-escape") + public JoinPredicateContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinPredicate; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterJoinPredicate(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitJoinPredicate(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor)visitor).visitJoinPredicate(this); + else return visitor.visitChildren(this); + } + } + + public final JoinPredicateContext joinPredicate() throws RecognitionException { + JoinPredicateContext _localctx = new JoinPredicateContext(_ctx, getState()); + enterRule(_localctx, 130, RULE_joinPredicate); + try { + enterOuterAlt(_localctx, 1); + { + setState(632); + valueExpression(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { case 1: @@ -5441,13 +5710,13 @@ private boolean processingCommand_sempred(ProcessingCommandContext _localctx, in return this.isDevVersion(); case 3: return this.isDevVersion(); + case 4: + return this.isDevVersion(); } return true; } private boolean booleanExpression_sempred(BooleanExpressionContext _localctx, int predIndex) { switch (predIndex) { - case 4: - return this.isDevVersion(); case 5: return precpred(_ctx, 5); case 6: @@ -5487,387 +5756,406 @@ private boolean identifierOrParameter_sempred(IdentifierOrParameterContext _loca } public static final String _serializedATN = - "\u0004\u0001w\u025b\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ - "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002"+ - "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002"+ - "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002"+ - "\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007\u000f"+ - "\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002\u0012\u0007\u0012"+ - "\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002\u0015\u0007\u0015"+ - "\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002\u0018\u0007\u0018"+ - "\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002\u001b\u0007\u001b"+ - "\u0002\u001c\u0007\u001c\u0002\u001d\u0007\u001d\u0002\u001e\u0007\u001e"+ - "\u0002\u001f\u0007\u001f\u0002 \u0007 \u0002!\u0007!\u0002\"\u0007\"\u0002"+ - "#\u0007#\u0002$\u0007$\u0002%\u0007%\u0002&\u0007&\u0002\'\u0007\'\u0002"+ - "(\u0007(\u0002)\u0007)\u0002*\u0007*\u0002+\u0007+\u0002,\u0007,\u0002"+ - "-\u0007-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u00021\u00071\u0002"+ - "2\u00072\u00023\u00073\u00024\u00074\u00025\u00075\u00026\u00076\u0002"+ - "7\u00077\u00028\u00078\u00029\u00079\u0002:\u0007:\u0002;\u0007;\u0002"+ - "<\u0007<\u0002=\u0007=\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001"+ - "\u0086\b\u0001\n\u0001\f\u0001\u0089\t\u0001\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0003\u0002\u0091\b\u0002\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001"+ - "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0003\u0003\u00a3\b\u0003\u0001"+ - "\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00af\b\u0005\u0001"+ - "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0005\u0005\u00b6"+ - "\b\u0005\n\u0005\f\u0005\u00b9\t\u0005\u0001\u0005\u0001\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0005\u0003\u0005\u00c0\b\u0005\u0001\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0005\u0003\u0005\u00c6\b\u0005\u0001\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0005\u0005\u00ce\b\u0005"+ - "\n\u0005\f\u0005\u00d1\t\u0005\u0001\u0006\u0001\u0006\u0003\u0006\u00d5"+ - "\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003"+ - "\u0006\u00dc\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003\u0006\u00e1"+ - "\b\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001"+ - "\b\u0001\b\u0001\b\u0001\b\u0003\b\u00ec\b\b\u0001\t\u0001\t\u0001\t\u0001"+ - "\t\u0003\t\u00f2\b\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0005"+ - "\t\u00fa\b\t\n\t\f\t\u00fd\t\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n"+ - "\u0001\n\u0001\n\u0001\n\u0003\n\u0107\b\n\u0001\n\u0001\n\u0001\n\u0005"+ - "\n\u010c\b\n\n\n\f\n\u010f\t\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001"+ - "\u000b\u0001\u000b\u0001\u000b\u0005\u000b\u0117\b\u000b\n\u000b\f\u000b"+ - "\u011a\t\u000b\u0003\u000b\u011c\b\u000b\u0001\u000b\u0001\u000b\u0001"+ - "\f\u0001\f\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f"+ - "\u0001\u000f\u0001\u000f\u0005\u000f\u012a\b\u000f\n\u000f\f\u000f\u012d"+ - "\t\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0003\u0010\u0132\b\u0010"+ - "\u0001\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011"+ - "\u0005\u0011\u013a\b\u0011\n\u0011\f\u0011\u013d\t\u0011\u0001\u0011\u0003"+ - "\u0011\u0140\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u0145"+ - "\b\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013\u0001\u0014\u0001"+ - "\u0014\u0001\u0015\u0001\u0015\u0003\u0015\u014f\b\u0015\u0001\u0016\u0001"+ - "\u0016\u0001\u0016\u0001\u0016\u0005\u0016\u0155\b\u0016\n\u0016\f\u0016"+ - "\u0158\t\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018"+ - "\u0001\u0018\u0001\u0018\u0001\u0018\u0005\u0018\u0162\b\u0018\n\u0018"+ - "\f\u0018\u0165\t\u0018\u0001\u0018\u0003\u0018\u0168\b\u0018\u0001\u0018"+ - "\u0001\u0018\u0003\u0018\u016c\b\u0018\u0001\u0019\u0001\u0019\u0001\u0019"+ - "\u0001\u001a\u0001\u001a\u0003\u001a\u0173\b\u001a\u0001\u001a\u0001\u001a"+ - "\u0003\u001a\u0177\b\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0005\u001b"+ - "\u017c\b\u001b\n\u001b\f\u001b\u017f\t\u001b\u0001\u001c\u0001\u001c\u0001"+ - "\u001c\u0003\u001c\u0184\b\u001c\u0001\u001d\u0001\u001d\u0001\u001d\u0005"+ - "\u001d\u0189\b\u001d\n\u001d\f\u001d\u018c\t\u001d\u0001\u001e\u0001\u001e"+ - "\u0001\u001e\u0005\u001e\u0191\b\u001e\n\u001e\f\u001e\u0194\t\u001e\u0001"+ - "\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u0199\b\u001f\n\u001f\f\u001f"+ - "\u019c\t\u001f\u0001 \u0001 \u0001!\u0001!\u0001!\u0003!\u01a3\b!\u0001"+ - "\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001"+ - "\"\u0001\"\u0001\"\u0001\"\u0005\"\u01b2\b\"\n\"\f\"\u01b5\t\"\u0001\""+ - "\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0005\"\u01bd\b\"\n\"\f\"\u01c0"+ - "\t\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0005\"\u01c8\b\""+ - "\n\"\f\"\u01cb\t\"\u0001\"\u0001\"\u0003\"\u01cf\b\"\u0001#\u0001#\u0003"+ - "#\u01d3\b#\u0001$\u0001$\u0001$\u0003$\u01d8\b$\u0001%\u0001%\u0001%\u0001"+ - "&\u0001&\u0001&\u0001&\u0005&\u01e1\b&\n&\f&\u01e4\t&\u0001\'\u0001\'"+ - "\u0003\'\u01e8\b\'\u0001\'\u0001\'\u0003\'\u01ec\b\'\u0001(\u0001(\u0001"+ - "(\u0001)\u0001)\u0001)\u0001*\u0001*\u0001*\u0001*\u0005*\u01f8\b*\n*"+ - "\f*\u01fb\t*\u0001+\u0001+\u0001+\u0001+\u0001,\u0001,\u0001,\u0001,\u0003"+ - ",\u0205\b,\u0001-\u0001-\u0001-\u0001-\u0001.\u0001.\u0001.\u0001/\u0001"+ - "/\u0001/\u0005/\u0211\b/\n/\f/\u0214\t/\u00010\u00010\u00010\u00010\u0001"+ - "1\u00011\u00012\u00012\u00032\u021e\b2\u00013\u00033\u0221\b3\u00013\u0001"+ - "3\u00014\u00034\u0226\b4\u00014\u00014\u00015\u00015\u00016\u00016\u0001"+ - "7\u00017\u00017\u00018\u00018\u00018\u00018\u00019\u00019\u00019\u0001"+ - ":\u0001:\u0001:\u0001:\u0003:\u023c\b:\u0001:\u0001:\u0001:\u0001:\u0005"+ - ":\u0242\b:\n:\f:\u0245\t:\u0003:\u0247\b:\u0001;\u0001;\u0001;\u0003;"+ - "\u024c\b;\u0001;\u0001;\u0001<\u0001<\u0001<\u0001<\u0001<\u0001=\u0001"+ - "=\u0001=\u0001=\u0003=\u0259\b=\u0001=\u0000\u0004\u0002\n\u0012\u0014"+ - ">\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a"+ - "\u001c\u001e \"$&(*,.02468:<>@BDFHJLNPRTVXZ\\^`bdfhjlnprtvxz\u0000\b\u0001"+ - "\u0000;<\u0001\u0000=?\u0002\u0000\u001a\u001aLL\u0001\u0000CD\u0002\u0000"+ - "\u001f\u001f##\u0002\u0000&&))\u0002\u0000%%33\u0002\u0000446:\u0274\u0000"+ - "|\u0001\u0000\u0000\u0000\u0002\u007f\u0001\u0000\u0000\u0000\u0004\u0090"+ - "\u0001\u0000\u0000\u0000\u0006\u00a2\u0001\u0000\u0000\u0000\b\u00a4\u0001"+ - "\u0000\u0000\u0000\n\u00c5\u0001\u0000\u0000\u0000\f\u00e0\u0001\u0000"+ - "\u0000\u0000\u000e\u00e2\u0001\u0000\u0000\u0000\u0010\u00eb\u0001\u0000"+ - "\u0000\u0000\u0012\u00f1\u0001\u0000\u0000\u0000\u0014\u0106\u0001\u0000"+ - "\u0000\u0000\u0016\u0110\u0001\u0000\u0000\u0000\u0018\u011f\u0001\u0000"+ - "\u0000\u0000\u001a\u0121\u0001\u0000\u0000\u0000\u001c\u0123\u0001\u0000"+ - "\u0000\u0000\u001e\u0126\u0001\u0000\u0000\u0000 \u0131\u0001\u0000\u0000"+ - "\u0000\"\u0135\u0001\u0000\u0000\u0000$\u0144\u0001\u0000\u0000\u0000"+ - "&\u0148\u0001\u0000\u0000\u0000(\u014a\u0001\u0000\u0000\u0000*\u014e"+ - "\u0001\u0000\u0000\u0000,\u0150\u0001\u0000\u0000\u0000.\u0159\u0001\u0000"+ - "\u0000\u00000\u015d\u0001\u0000\u0000\u00002\u016d\u0001\u0000\u0000\u0000"+ - "4\u0170\u0001\u0000\u0000\u00006\u0178\u0001\u0000\u0000\u00008\u0180"+ - "\u0001\u0000\u0000\u0000:\u0185\u0001\u0000\u0000\u0000<\u018d\u0001\u0000"+ - "\u0000\u0000>\u0195\u0001\u0000\u0000\u0000@\u019d\u0001\u0000\u0000\u0000"+ - "B\u01a2\u0001\u0000\u0000\u0000D\u01ce\u0001\u0000\u0000\u0000F\u01d2"+ - "\u0001\u0000\u0000\u0000H\u01d7\u0001\u0000\u0000\u0000J\u01d9\u0001\u0000"+ - "\u0000\u0000L\u01dc\u0001\u0000\u0000\u0000N\u01e5\u0001\u0000\u0000\u0000"+ - "P\u01ed\u0001\u0000\u0000\u0000R\u01f0\u0001\u0000\u0000\u0000T\u01f3"+ - "\u0001\u0000\u0000\u0000V\u01fc\u0001\u0000\u0000\u0000X\u0200\u0001\u0000"+ - "\u0000\u0000Z\u0206\u0001\u0000\u0000\u0000\\\u020a\u0001\u0000\u0000"+ - "\u0000^\u020d\u0001\u0000\u0000\u0000`\u0215\u0001\u0000\u0000\u0000b"+ - "\u0219\u0001\u0000\u0000\u0000d\u021d\u0001\u0000\u0000\u0000f\u0220\u0001"+ - "\u0000\u0000\u0000h\u0225\u0001\u0000\u0000\u0000j\u0229\u0001\u0000\u0000"+ - "\u0000l\u022b\u0001\u0000\u0000\u0000n\u022d\u0001\u0000\u0000\u0000p"+ - "\u0230\u0001\u0000\u0000\u0000r\u0234\u0001\u0000\u0000\u0000t\u0237\u0001"+ - "\u0000\u0000\u0000v\u024b\u0001\u0000\u0000\u0000x\u024f\u0001\u0000\u0000"+ - "\u0000z\u0254\u0001\u0000\u0000\u0000|}\u0003\u0002\u0001\u0000}~\u0005"+ - "\u0000\u0000\u0001~\u0001\u0001\u0000\u0000\u0000\u007f\u0080\u0006\u0001"+ - "\uffff\uffff\u0000\u0080\u0081\u0003\u0004\u0002\u0000\u0081\u0087\u0001"+ - "\u0000\u0000\u0000\u0082\u0083\n\u0001\u0000\u0000\u0083\u0084\u0005\u0019"+ - "\u0000\u0000\u0084\u0086\u0003\u0006\u0003\u0000\u0085\u0082\u0001\u0000"+ - "\u0000\u0000\u0086\u0089\u0001\u0000\u0000\u0000\u0087\u0085\u0001\u0000"+ - "\u0000\u0000\u0087\u0088\u0001\u0000\u0000\u0000\u0088\u0003\u0001\u0000"+ - "\u0000\u0000\u0089\u0087\u0001\u0000\u0000\u0000\u008a\u0091\u0003n7\u0000"+ - "\u008b\u0091\u0003\"\u0011\u0000\u008c\u0091\u0003\u001c\u000e\u0000\u008d"+ - "\u0091\u0003r9\u0000\u008e\u008f\u0004\u0002\u0001\u0000\u008f\u0091\u0003"+ - "0\u0018\u0000\u0090\u008a\u0001\u0000\u0000\u0000\u0090\u008b\u0001\u0000"+ - "\u0000\u0000\u0090\u008c\u0001\u0000\u0000\u0000\u0090\u008d\u0001\u0000"+ - "\u0000\u0000\u0090\u008e\u0001\u0000\u0000\u0000\u0091\u0005\u0001\u0000"+ - "\u0000\u0000\u0092\u00a3\u00032\u0019\u0000\u0093\u00a3\u0003\b\u0004"+ - "\u0000\u0094\u00a3\u0003P(\u0000\u0095\u00a3\u0003J%\u0000\u0096\u00a3"+ - "\u00034\u001a\u0000\u0097\u00a3\u0003L&\u0000\u0098\u00a3\u0003R)\u0000"+ - "\u0099\u00a3\u0003T*\u0000\u009a\u00a3\u0003X,\u0000\u009b\u00a3\u0003"+ - "Z-\u0000\u009c\u00a3\u0003t:\u0000\u009d\u00a3\u0003\\.\u0000\u009e\u009f"+ - "\u0004\u0003\u0002\u0000\u009f\u00a3\u0003z=\u0000\u00a0\u00a1\u0004\u0003"+ - "\u0003\u0000\u00a1\u00a3\u0003x<\u0000\u00a2\u0092\u0001\u0000\u0000\u0000"+ - "\u00a2\u0093\u0001\u0000\u0000\u0000\u00a2\u0094\u0001\u0000\u0000\u0000"+ - "\u00a2\u0095\u0001\u0000\u0000\u0000\u00a2\u0096\u0001\u0000\u0000\u0000"+ - "\u00a2\u0097\u0001\u0000\u0000\u0000\u00a2\u0098\u0001\u0000\u0000\u0000"+ - "\u00a2\u0099\u0001\u0000\u0000\u0000\u00a2\u009a\u0001\u0000\u0000\u0000"+ - "\u00a2\u009b\u0001\u0000\u0000\u0000\u00a2\u009c\u0001\u0000\u0000\u0000"+ - "\u00a2\u009d\u0001\u0000\u0000\u0000\u00a2\u009e\u0001\u0000\u0000\u0000"+ - "\u00a2\u00a0\u0001\u0000\u0000\u0000\u00a3\u0007\u0001\u0000\u0000\u0000"+ - "\u00a4\u00a5\u0005\u0010\u0000\u0000\u00a5\u00a6\u0003\n\u0005\u0000\u00a6"+ - "\t\u0001\u0000\u0000\u0000\u00a7\u00a8\u0006\u0005\uffff\uffff\u0000\u00a8"+ - "\u00a9\u0005,\u0000\u0000\u00a9\u00c6\u0003\n\u0005\b\u00aa\u00c6\u0003"+ - "\u0010\b\u0000\u00ab\u00c6\u0003\f\u0006\u0000\u00ac\u00ae\u0003\u0010"+ - "\b\u0000\u00ad\u00af\u0005,\u0000\u0000\u00ae\u00ad\u0001\u0000\u0000"+ - "\u0000\u00ae\u00af\u0001\u0000\u0000\u0000\u00af\u00b0\u0001\u0000\u0000"+ - "\u0000\u00b0\u00b1\u0005\'\u0000\u0000\u00b1\u00b2\u0005+\u0000\u0000"+ - "\u00b2\u00b7\u0003\u0010\b\u0000\u00b3\u00b4\u0005\"\u0000\u0000\u00b4"+ - "\u00b6\u0003\u0010\b\u0000\u00b5\u00b3\u0001\u0000\u0000\u0000\u00b6\u00b9"+ - "\u0001\u0000\u0000\u0000\u00b7\u00b5\u0001\u0000\u0000\u0000\u00b7\u00b8"+ - "\u0001\u0000\u0000\u0000\u00b8\u00ba\u0001\u0000\u0000\u0000\u00b9\u00b7"+ - "\u0001\u0000\u0000\u0000\u00ba\u00bb\u00052\u0000\u0000\u00bb\u00c6\u0001"+ - "\u0000\u0000\u0000\u00bc\u00bd\u0003\u0010\b\u0000\u00bd\u00bf\u0005("+ - "\u0000\u0000\u00be\u00c0\u0005,\u0000\u0000\u00bf\u00be\u0001\u0000\u0000"+ - "\u0000\u00bf\u00c0\u0001\u0000\u0000\u0000\u00c0\u00c1\u0001\u0000\u0000"+ - "\u0000\u00c1\u00c2\u0005-\u0000\u0000\u00c2\u00c6\u0001\u0000\u0000\u0000"+ - "\u00c3\u00c4\u0004\u0005\u0004\u0000\u00c4\u00c6\u0003\u000e\u0007\u0000"+ - "\u00c5\u00a7\u0001\u0000\u0000\u0000\u00c5\u00aa\u0001\u0000\u0000\u0000"+ - "\u00c5\u00ab\u0001\u0000\u0000\u0000\u00c5\u00ac\u0001\u0000\u0000\u0000"+ - "\u00c5\u00bc\u0001\u0000\u0000\u0000\u00c5\u00c3\u0001\u0000\u0000\u0000"+ - "\u00c6\u00cf\u0001\u0000\u0000\u0000\u00c7\u00c8\n\u0005\u0000\u0000\u00c8"+ - "\u00c9\u0005\u001e\u0000\u0000\u00c9\u00ce\u0003\n\u0005\u0006\u00ca\u00cb"+ - "\n\u0004\u0000\u0000\u00cb\u00cc\u0005/\u0000\u0000\u00cc\u00ce\u0003"+ - "\n\u0005\u0005\u00cd\u00c7\u0001\u0000\u0000\u0000\u00cd\u00ca\u0001\u0000"+ - "\u0000\u0000\u00ce\u00d1\u0001\u0000\u0000\u0000\u00cf\u00cd\u0001\u0000"+ - "\u0000\u0000\u00cf\u00d0\u0001\u0000\u0000\u0000\u00d0\u000b\u0001\u0000"+ - "\u0000\u0000\u00d1\u00cf\u0001\u0000\u0000\u0000\u00d2\u00d4\u0003\u0010"+ - "\b\u0000\u00d3\u00d5\u0005,\u0000\u0000\u00d4\u00d3\u0001\u0000\u0000"+ - "\u0000\u00d4\u00d5\u0001\u0000\u0000\u0000\u00d5\u00d6\u0001\u0000\u0000"+ - "\u0000\u00d6\u00d7\u0005*\u0000\u0000\u00d7\u00d8\u0003j5\u0000\u00d8"+ - "\u00e1\u0001\u0000\u0000\u0000\u00d9\u00db\u0003\u0010\b\u0000\u00da\u00dc"+ - "\u0005,\u0000\u0000\u00db\u00da\u0001\u0000\u0000\u0000\u00db\u00dc\u0001"+ - "\u0000\u0000\u0000\u00dc\u00dd\u0001\u0000\u0000\u0000\u00dd\u00de\u0005"+ - "1\u0000\u0000\u00de\u00df\u0003j5\u0000\u00df\u00e1\u0001\u0000\u0000"+ - "\u0000\u00e0\u00d2\u0001\u0000\u0000\u0000\u00e0\u00d9\u0001\u0000\u0000"+ - "\u0000\u00e1\r\u0001\u0000\u0000\u0000\u00e2\u00e3\u0003:\u001d\u0000"+ - "\u00e3\u00e4\u0005\u0018\u0000\u0000\u00e4\u00e5\u0003D\"\u0000\u00e5"+ - "\u000f\u0001\u0000\u0000\u0000\u00e6\u00ec\u0003\u0012\t\u0000\u00e7\u00e8"+ - "\u0003\u0012\t\u0000\u00e8\u00e9\u0003l6\u0000\u00e9\u00ea\u0003\u0012"+ - "\t\u0000\u00ea\u00ec\u0001\u0000\u0000\u0000\u00eb\u00e6\u0001\u0000\u0000"+ - "\u0000\u00eb\u00e7\u0001\u0000\u0000\u0000\u00ec\u0011\u0001\u0000\u0000"+ - "\u0000\u00ed\u00ee\u0006\t\uffff\uffff\u0000\u00ee\u00f2\u0003\u0014\n"+ - "\u0000\u00ef\u00f0\u0007\u0000\u0000\u0000\u00f0\u00f2\u0003\u0012\t\u0003"+ - "\u00f1\u00ed\u0001\u0000\u0000\u0000\u00f1\u00ef\u0001\u0000\u0000\u0000"+ - "\u00f2\u00fb\u0001\u0000\u0000\u0000\u00f3\u00f4\n\u0002\u0000\u0000\u00f4"+ - "\u00f5\u0007\u0001\u0000\u0000\u00f5\u00fa\u0003\u0012\t\u0003\u00f6\u00f7"+ - "\n\u0001\u0000\u0000\u00f7\u00f8\u0007\u0000\u0000\u0000\u00f8\u00fa\u0003"+ - "\u0012\t\u0002\u00f9\u00f3\u0001\u0000\u0000\u0000\u00f9\u00f6\u0001\u0000"+ - "\u0000\u0000\u00fa\u00fd\u0001\u0000\u0000\u0000\u00fb\u00f9\u0001\u0000"+ - "\u0000\u0000\u00fb\u00fc\u0001\u0000\u0000\u0000\u00fc\u0013\u0001\u0000"+ - "\u0000\u0000\u00fd\u00fb\u0001\u0000\u0000\u0000\u00fe\u00ff\u0006\n\uffff"+ - "\uffff\u0000\u00ff\u0107\u0003D\"\u0000\u0100\u0107\u0003:\u001d\u0000"+ - "\u0101\u0107\u0003\u0016\u000b\u0000\u0102\u0103\u0005+\u0000\u0000\u0103"+ - "\u0104\u0003\n\u0005\u0000\u0104\u0105\u00052\u0000\u0000\u0105\u0107"+ - "\u0001\u0000\u0000\u0000\u0106\u00fe\u0001\u0000\u0000\u0000\u0106\u0100"+ - "\u0001\u0000\u0000\u0000\u0106\u0101\u0001\u0000\u0000\u0000\u0106\u0102"+ - "\u0001\u0000\u0000\u0000\u0107\u010d\u0001\u0000\u0000\u0000\u0108\u0109"+ - "\n\u0001\u0000\u0000\u0109\u010a\u0005!\u0000\u0000\u010a\u010c\u0003"+ - "\u001a\r\u0000\u010b\u0108\u0001\u0000\u0000\u0000\u010c\u010f\u0001\u0000"+ - "\u0000\u0000\u010d\u010b\u0001\u0000\u0000\u0000\u010d\u010e\u0001\u0000"+ - "\u0000\u0000\u010e\u0015\u0001\u0000\u0000\u0000\u010f\u010d\u0001\u0000"+ - "\u0000\u0000\u0110\u0111\u0003\u0018\f\u0000\u0111\u011b\u0005+\u0000"+ - "\u0000\u0112\u011c\u0005=\u0000\u0000\u0113\u0118\u0003\n\u0005\u0000"+ - "\u0114\u0115\u0005\"\u0000\u0000\u0115\u0117\u0003\n\u0005\u0000\u0116"+ - "\u0114\u0001\u0000\u0000\u0000\u0117\u011a\u0001\u0000\u0000\u0000\u0118"+ - "\u0116\u0001\u0000\u0000\u0000\u0118\u0119\u0001\u0000\u0000\u0000\u0119"+ - "\u011c\u0001\u0000\u0000\u0000\u011a\u0118\u0001\u0000\u0000\u0000\u011b"+ - "\u0112\u0001\u0000\u0000\u0000\u011b\u0113\u0001\u0000\u0000\u0000\u011b"+ - "\u011c\u0001\u0000\u0000\u0000\u011c\u011d\u0001\u0000\u0000\u0000\u011d"+ - "\u011e\u00052\u0000\u0000\u011e\u0017\u0001\u0000\u0000\u0000\u011f\u0120"+ - "\u0003H$\u0000\u0120\u0019\u0001\u0000\u0000\u0000\u0121\u0122\u0003@"+ - " \u0000\u0122\u001b\u0001\u0000\u0000\u0000\u0123\u0124\u0005\f\u0000"+ - "\u0000\u0124\u0125\u0003\u001e\u000f\u0000\u0125\u001d\u0001\u0000\u0000"+ - "\u0000\u0126\u012b\u0003 \u0010\u0000\u0127\u0128\u0005\"\u0000\u0000"+ - "\u0128\u012a\u0003 \u0010\u0000\u0129\u0127\u0001\u0000\u0000\u0000\u012a"+ - "\u012d\u0001\u0000\u0000\u0000\u012b\u0129\u0001\u0000\u0000\u0000\u012b"+ - "\u012c\u0001\u0000\u0000\u0000\u012c\u001f\u0001\u0000\u0000\u0000\u012d"+ - "\u012b\u0001\u0000\u0000\u0000\u012e\u012f\u0003:\u001d\u0000\u012f\u0130"+ - "\u0005 \u0000\u0000\u0130\u0132\u0001\u0000\u0000\u0000\u0131\u012e\u0001"+ - "\u0000\u0000\u0000\u0131\u0132\u0001\u0000\u0000\u0000\u0132\u0133\u0001"+ - "\u0000\u0000\u0000\u0133\u0134\u0003\n\u0005\u0000\u0134!\u0001\u0000"+ - "\u0000\u0000\u0135\u0136\u0005\u0006\u0000\u0000\u0136\u013b\u0003$\u0012"+ - "\u0000\u0137\u0138\u0005\"\u0000\u0000\u0138\u013a\u0003$\u0012\u0000"+ - "\u0139\u0137\u0001\u0000\u0000\u0000\u013a\u013d\u0001\u0000\u0000\u0000"+ - "\u013b\u0139\u0001\u0000\u0000\u0000\u013b\u013c\u0001\u0000\u0000\u0000"+ - "\u013c\u013f\u0001\u0000\u0000\u0000\u013d\u013b\u0001\u0000\u0000\u0000"+ - "\u013e\u0140\u0003*\u0015\u0000\u013f\u013e\u0001\u0000\u0000\u0000\u013f"+ - "\u0140\u0001\u0000\u0000\u0000\u0140#\u0001\u0000\u0000\u0000\u0141\u0142"+ - "\u0003&\u0013\u0000\u0142\u0143\u0005\u0018\u0000\u0000\u0143\u0145\u0001"+ - "\u0000\u0000\u0000\u0144\u0141\u0001\u0000\u0000\u0000\u0144\u0145\u0001"+ - "\u0000\u0000\u0000\u0145\u0146\u0001\u0000\u0000\u0000\u0146\u0147\u0003"+ - "(\u0014\u0000\u0147%\u0001\u0000\u0000\u0000\u0148\u0149\u0005L\u0000"+ - "\u0000\u0149\'\u0001\u0000\u0000\u0000\u014a\u014b\u0007\u0002\u0000\u0000"+ - "\u014b)\u0001\u0000\u0000\u0000\u014c\u014f\u0003,\u0016\u0000\u014d\u014f"+ - "\u0003.\u0017\u0000\u014e\u014c\u0001\u0000\u0000\u0000\u014e\u014d\u0001"+ - "\u0000\u0000\u0000\u014f+\u0001\u0000\u0000\u0000\u0150\u0151\u0005K\u0000"+ - "\u0000\u0151\u0156\u0005L\u0000\u0000\u0152\u0153\u0005\"\u0000\u0000"+ - "\u0153\u0155\u0005L\u0000\u0000\u0154\u0152\u0001\u0000\u0000\u0000\u0155"+ - "\u0158\u0001\u0000\u0000\u0000\u0156\u0154\u0001\u0000\u0000\u0000\u0156"+ - "\u0157\u0001\u0000\u0000\u0000\u0157-\u0001\u0000\u0000\u0000\u0158\u0156"+ - "\u0001\u0000\u0000\u0000\u0159\u015a\u0005A\u0000\u0000\u015a\u015b\u0003"+ - ",\u0016\u0000\u015b\u015c\u0005B\u0000\u0000\u015c/\u0001\u0000\u0000"+ - "\u0000\u015d\u015e\u0005\u0013\u0000\u0000\u015e\u0163\u0003$\u0012\u0000"+ - "\u015f\u0160\u0005\"\u0000\u0000\u0160\u0162\u0003$\u0012\u0000\u0161"+ - "\u015f\u0001\u0000\u0000\u0000\u0162\u0165\u0001\u0000\u0000\u0000\u0163"+ - "\u0161\u0001\u0000\u0000\u0000\u0163\u0164\u0001\u0000\u0000\u0000\u0164"+ - "\u0167\u0001\u0000\u0000\u0000\u0165\u0163\u0001\u0000\u0000\u0000\u0166"+ - "\u0168\u00036\u001b\u0000\u0167\u0166\u0001\u0000\u0000\u0000\u0167\u0168"+ - "\u0001\u0000\u0000\u0000\u0168\u016b\u0001\u0000\u0000\u0000\u0169\u016a"+ - "\u0005\u001d\u0000\u0000\u016a\u016c\u0003\u001e\u000f\u0000\u016b\u0169"+ - "\u0001\u0000\u0000\u0000\u016b\u016c\u0001\u0000\u0000\u0000\u016c1\u0001"+ - "\u0000\u0000\u0000\u016d\u016e\u0005\u0004\u0000\u0000\u016e\u016f\u0003"+ - "\u001e\u000f\u0000\u016f3\u0001\u0000\u0000\u0000\u0170\u0172\u0005\u000f"+ - "\u0000\u0000\u0171\u0173\u00036\u001b\u0000\u0172\u0171\u0001\u0000\u0000"+ - "\u0000\u0172\u0173\u0001\u0000\u0000\u0000\u0173\u0176\u0001\u0000\u0000"+ - "\u0000\u0174\u0175\u0005\u001d\u0000\u0000\u0175\u0177\u0003\u001e\u000f"+ - "\u0000\u0176\u0174\u0001\u0000\u0000\u0000\u0176\u0177\u0001\u0000\u0000"+ - "\u0000\u01775\u0001\u0000\u0000\u0000\u0178\u017d\u00038\u001c\u0000\u0179"+ - "\u017a\u0005\"\u0000\u0000\u017a\u017c\u00038\u001c\u0000\u017b\u0179"+ - "\u0001\u0000\u0000\u0000\u017c\u017f\u0001\u0000\u0000\u0000\u017d\u017b"+ - "\u0001\u0000\u0000\u0000\u017d\u017e\u0001\u0000\u0000\u0000\u017e7\u0001"+ - "\u0000\u0000\u0000\u017f\u017d\u0001\u0000\u0000\u0000\u0180\u0183\u0003"+ - " \u0010\u0000\u0181\u0182\u0005\u0010\u0000\u0000\u0182\u0184\u0003\n"+ - "\u0005\u0000\u0183\u0181\u0001\u0000\u0000\u0000\u0183\u0184\u0001\u0000"+ - "\u0000\u0000\u01849\u0001\u0000\u0000\u0000\u0185\u018a\u0003H$\u0000"+ - "\u0186\u0187\u0005$\u0000\u0000\u0187\u0189\u0003H$\u0000\u0188\u0186"+ - "\u0001\u0000\u0000\u0000\u0189\u018c\u0001\u0000\u0000\u0000\u018a\u0188"+ - "\u0001\u0000\u0000\u0000\u018a\u018b\u0001\u0000\u0000\u0000\u018b;\u0001"+ - "\u0000\u0000\u0000\u018c\u018a\u0001\u0000\u0000\u0000\u018d\u0192\u0003"+ - "B!\u0000\u018e\u018f\u0005$\u0000\u0000\u018f\u0191\u0003B!\u0000\u0190"+ - "\u018e\u0001\u0000\u0000\u0000\u0191\u0194\u0001\u0000\u0000\u0000\u0192"+ - "\u0190\u0001\u0000\u0000\u0000\u0192\u0193\u0001\u0000\u0000\u0000\u0193"+ - "=\u0001\u0000\u0000\u0000\u0194\u0192\u0001\u0000\u0000\u0000\u0195\u019a"+ - "\u0003<\u001e\u0000\u0196\u0197\u0005\"\u0000\u0000\u0197\u0199\u0003"+ - "<\u001e\u0000\u0198\u0196\u0001\u0000\u0000\u0000\u0199\u019c\u0001\u0000"+ - "\u0000\u0000\u019a\u0198\u0001\u0000\u0000\u0000\u019a\u019b\u0001\u0000"+ - "\u0000\u0000\u019b?\u0001\u0000\u0000\u0000\u019c\u019a\u0001\u0000\u0000"+ - "\u0000\u019d\u019e\u0007\u0003\u0000\u0000\u019eA\u0001\u0000\u0000\u0000"+ - "\u019f\u01a3\u0005P\u0000\u0000\u01a0\u01a1\u0004!\n\u0000\u01a1\u01a3"+ - "\u0003F#\u0000\u01a2\u019f\u0001\u0000\u0000\u0000\u01a2\u01a0\u0001\u0000"+ - "\u0000\u0000\u01a3C\u0001\u0000\u0000\u0000\u01a4\u01cf\u0005-\u0000\u0000"+ - "\u01a5\u01a6\u0003h4\u0000\u01a6\u01a7\u0005C\u0000\u0000\u01a7\u01cf"+ - "\u0001\u0000\u0000\u0000\u01a8\u01cf\u0003f3\u0000\u01a9\u01cf\u0003h"+ - "4\u0000\u01aa\u01cf\u0003b1\u0000\u01ab\u01cf\u0003F#\u0000\u01ac\u01cf"+ - "\u0003j5\u0000\u01ad\u01ae\u0005A\u0000\u0000\u01ae\u01b3\u0003d2\u0000"+ - "\u01af\u01b0\u0005\"\u0000\u0000\u01b0\u01b2\u0003d2\u0000\u01b1\u01af"+ - "\u0001\u0000\u0000\u0000\u01b2\u01b5\u0001\u0000\u0000\u0000\u01b3\u01b1"+ - "\u0001\u0000\u0000\u0000\u01b3\u01b4\u0001\u0000\u0000\u0000\u01b4\u01b6"+ - "\u0001\u0000\u0000\u0000\u01b5\u01b3\u0001\u0000\u0000\u0000\u01b6\u01b7"+ - "\u0005B\u0000\u0000\u01b7\u01cf\u0001\u0000\u0000\u0000\u01b8\u01b9\u0005"+ - "A\u0000\u0000\u01b9\u01be\u0003b1\u0000\u01ba\u01bb\u0005\"\u0000\u0000"+ - "\u01bb\u01bd\u0003b1\u0000\u01bc\u01ba\u0001\u0000\u0000\u0000\u01bd\u01c0"+ - "\u0001\u0000\u0000\u0000\u01be\u01bc\u0001\u0000\u0000\u0000\u01be\u01bf"+ - "\u0001\u0000\u0000\u0000\u01bf\u01c1\u0001\u0000\u0000\u0000\u01c0\u01be"+ - "\u0001\u0000\u0000\u0000\u01c1\u01c2\u0005B\u0000\u0000\u01c2\u01cf\u0001"+ - "\u0000\u0000\u0000\u01c3\u01c4\u0005A\u0000\u0000\u01c4\u01c9\u0003j5"+ - "\u0000\u01c5\u01c6\u0005\"\u0000\u0000\u01c6\u01c8\u0003j5\u0000\u01c7"+ - "\u01c5\u0001\u0000\u0000\u0000\u01c8\u01cb\u0001\u0000\u0000\u0000\u01c9"+ - "\u01c7\u0001\u0000\u0000\u0000\u01c9\u01ca\u0001\u0000\u0000\u0000\u01ca"+ - "\u01cc\u0001\u0000\u0000\u0000\u01cb\u01c9\u0001\u0000\u0000\u0000\u01cc"+ - "\u01cd\u0005B\u0000\u0000\u01cd\u01cf\u0001\u0000\u0000\u0000\u01ce\u01a4"+ - "\u0001\u0000\u0000\u0000\u01ce\u01a5\u0001\u0000\u0000\u0000\u01ce\u01a8"+ - "\u0001\u0000\u0000\u0000\u01ce\u01a9\u0001\u0000\u0000\u0000\u01ce\u01aa"+ - "\u0001\u0000\u0000\u0000\u01ce\u01ab\u0001\u0000\u0000\u0000\u01ce\u01ac"+ - "\u0001\u0000\u0000\u0000\u01ce\u01ad\u0001\u0000\u0000\u0000\u01ce\u01b8"+ - "\u0001\u0000\u0000\u0000\u01ce\u01c3\u0001\u0000\u0000\u0000\u01cfE\u0001"+ - "\u0000\u0000\u0000\u01d0\u01d3\u00050\u0000\u0000\u01d1\u01d3\u0005@\u0000"+ - "\u0000\u01d2\u01d0\u0001\u0000\u0000\u0000\u01d2\u01d1\u0001\u0000\u0000"+ - "\u0000\u01d3G\u0001\u0000\u0000\u0000\u01d4\u01d8\u0003@ \u0000\u01d5"+ - "\u01d6\u0004$\u000b\u0000\u01d6\u01d8\u0003F#\u0000\u01d7\u01d4\u0001"+ - "\u0000\u0000\u0000\u01d7\u01d5\u0001\u0000\u0000\u0000\u01d8I\u0001\u0000"+ - "\u0000\u0000\u01d9\u01da\u0005\t\u0000\u0000\u01da\u01db\u0005\u001b\u0000"+ - "\u0000\u01dbK\u0001\u0000\u0000\u0000\u01dc\u01dd\u0005\u000e\u0000\u0000"+ - "\u01dd\u01e2\u0003N\'\u0000\u01de\u01df\u0005\"\u0000\u0000\u01df\u01e1"+ - "\u0003N\'\u0000\u01e0\u01de\u0001\u0000\u0000\u0000\u01e1\u01e4\u0001"+ - "\u0000\u0000\u0000\u01e2\u01e0\u0001\u0000\u0000\u0000\u01e2\u01e3\u0001"+ - "\u0000\u0000\u0000\u01e3M\u0001\u0000\u0000\u0000\u01e4\u01e2\u0001\u0000"+ - "\u0000\u0000\u01e5\u01e7\u0003\n\u0005\u0000\u01e6\u01e8\u0007\u0004\u0000"+ - "\u0000\u01e7\u01e6\u0001\u0000\u0000\u0000\u01e7\u01e8\u0001\u0000\u0000"+ - "\u0000\u01e8\u01eb\u0001\u0000\u0000\u0000\u01e9\u01ea\u0005.\u0000\u0000"+ - "\u01ea\u01ec\u0007\u0005\u0000\u0000\u01eb\u01e9\u0001\u0000\u0000\u0000"+ - "\u01eb\u01ec\u0001\u0000\u0000\u0000\u01ecO\u0001\u0000\u0000\u0000\u01ed"+ - "\u01ee\u0005\b\u0000\u0000\u01ee\u01ef\u0003>\u001f\u0000\u01efQ\u0001"+ - "\u0000\u0000\u0000\u01f0\u01f1\u0005\u0002\u0000\u0000\u01f1\u01f2\u0003"+ - ">\u001f\u0000\u01f2S\u0001\u0000\u0000\u0000\u01f3\u01f4\u0005\u000b\u0000"+ - "\u0000\u01f4\u01f9\u0003V+\u0000\u01f5\u01f6\u0005\"\u0000\u0000\u01f6"+ - "\u01f8\u0003V+\u0000\u01f7\u01f5\u0001\u0000\u0000\u0000\u01f8\u01fb\u0001"+ - "\u0000\u0000\u0000\u01f9\u01f7\u0001\u0000\u0000\u0000\u01f9\u01fa\u0001"+ - "\u0000\u0000\u0000\u01faU\u0001\u0000\u0000\u0000\u01fb\u01f9\u0001\u0000"+ - "\u0000\u0000\u01fc\u01fd\u0003<\u001e\u0000\u01fd\u01fe\u0005T\u0000\u0000"+ - "\u01fe\u01ff\u0003<\u001e\u0000\u01ffW\u0001\u0000\u0000\u0000\u0200\u0201"+ - "\u0005\u0001\u0000\u0000\u0201\u0202\u0003\u0014\n\u0000\u0202\u0204\u0003"+ - "j5\u0000\u0203\u0205\u0003^/\u0000\u0204\u0203\u0001\u0000\u0000\u0000"+ - "\u0204\u0205\u0001\u0000\u0000\u0000\u0205Y\u0001\u0000\u0000\u0000\u0206"+ - "\u0207\u0005\u0007\u0000\u0000\u0207\u0208\u0003\u0014\n\u0000\u0208\u0209"+ - "\u0003j5\u0000\u0209[\u0001\u0000\u0000\u0000\u020a\u020b\u0005\n\u0000"+ - "\u0000\u020b\u020c\u0003:\u001d\u0000\u020c]\u0001\u0000\u0000\u0000\u020d"+ - "\u0212\u0003`0\u0000\u020e\u020f\u0005\"\u0000\u0000\u020f\u0211\u0003"+ - "`0\u0000\u0210\u020e\u0001\u0000\u0000\u0000\u0211\u0214\u0001\u0000\u0000"+ - "\u0000\u0212\u0210\u0001\u0000\u0000\u0000\u0212\u0213\u0001\u0000\u0000"+ - "\u0000\u0213_\u0001\u0000\u0000\u0000\u0214\u0212\u0001\u0000\u0000\u0000"+ - "\u0215\u0216\u0003@ \u0000\u0216\u0217\u0005 \u0000\u0000\u0217\u0218"+ - "\u0003D\"\u0000\u0218a\u0001\u0000\u0000\u0000\u0219\u021a\u0007\u0006"+ - "\u0000\u0000\u021ac\u0001\u0000\u0000\u0000\u021b\u021e\u0003f3\u0000"+ - "\u021c\u021e\u0003h4\u0000\u021d\u021b\u0001\u0000\u0000\u0000\u021d\u021c"+ - "\u0001\u0000\u0000\u0000\u021ee\u0001\u0000\u0000\u0000\u021f\u0221\u0007"+ - "\u0000\u0000\u0000\u0220\u021f\u0001\u0000\u0000\u0000\u0220\u0221\u0001"+ - "\u0000\u0000\u0000\u0221\u0222\u0001\u0000\u0000\u0000\u0222\u0223\u0005"+ - "\u001c\u0000\u0000\u0223g\u0001\u0000\u0000\u0000\u0224\u0226\u0007\u0000"+ - "\u0000\u0000\u0225\u0224\u0001\u0000\u0000\u0000\u0225\u0226\u0001\u0000"+ - "\u0000\u0000\u0226\u0227\u0001\u0000\u0000\u0000\u0227\u0228\u0005\u001b"+ - "\u0000\u0000\u0228i\u0001\u0000\u0000\u0000\u0229\u022a\u0005\u001a\u0000"+ - "\u0000\u022ak\u0001\u0000\u0000\u0000\u022b\u022c\u0007\u0007\u0000\u0000"+ - "\u022cm\u0001\u0000\u0000\u0000\u022d\u022e\u0005\u0005\u0000\u0000\u022e"+ - "\u022f\u0003p8\u0000\u022fo\u0001\u0000\u0000\u0000\u0230\u0231\u0005"+ - "A\u0000\u0000\u0231\u0232\u0003\u0002\u0001\u0000\u0232\u0233\u0005B\u0000"+ - "\u0000\u0233q\u0001\u0000\u0000\u0000\u0234\u0235\u0005\r\u0000\u0000"+ - "\u0235\u0236\u0005d\u0000\u0000\u0236s\u0001\u0000\u0000\u0000\u0237\u0238"+ - "\u0005\u0003\u0000\u0000\u0238\u023b\u0005Z\u0000\u0000\u0239\u023a\u0005"+ - "X\u0000\u0000\u023a\u023c\u0003<\u001e\u0000\u023b\u0239\u0001\u0000\u0000"+ - "\u0000\u023b\u023c\u0001\u0000\u0000\u0000\u023c\u0246\u0001\u0000\u0000"+ - "\u0000\u023d\u023e\u0005Y\u0000\u0000\u023e\u0243\u0003v;\u0000\u023f"+ - "\u0240\u0005\"\u0000\u0000\u0240\u0242\u0003v;\u0000\u0241\u023f\u0001"+ - "\u0000\u0000\u0000\u0242\u0245\u0001\u0000\u0000\u0000\u0243\u0241\u0001"+ - "\u0000\u0000\u0000\u0243\u0244\u0001\u0000\u0000\u0000\u0244\u0247\u0001"+ - "\u0000\u0000\u0000\u0245\u0243\u0001\u0000\u0000\u0000\u0246\u023d\u0001"+ - "\u0000\u0000\u0000\u0246\u0247\u0001\u0000\u0000\u0000\u0247u\u0001\u0000"+ - "\u0000\u0000\u0248\u0249\u0003<\u001e\u0000\u0249\u024a\u0005 \u0000\u0000"+ - "\u024a\u024c\u0001\u0000\u0000\u0000\u024b\u0248\u0001\u0000\u0000\u0000"+ - "\u024b\u024c\u0001\u0000\u0000\u0000\u024c\u024d\u0001\u0000\u0000\u0000"+ - "\u024d\u024e\u0003<\u001e\u0000\u024ew\u0001\u0000\u0000\u0000\u024f\u0250"+ - "\u0005\u0012\u0000\u0000\u0250\u0251\u0003$\u0012\u0000\u0251\u0252\u0005"+ - "X\u0000\u0000\u0252\u0253\u0003>\u001f\u0000\u0253y\u0001\u0000\u0000"+ - "\u0000\u0254\u0255\u0005\u0011\u0000\u0000\u0255\u0258\u00036\u001b\u0000"+ - "\u0256\u0257\u0005\u001d\u0000\u0000\u0257\u0259\u0003\u001e\u000f\u0000"+ - "\u0258\u0256\u0001\u0000\u0000\u0000\u0258\u0259\u0001\u0000\u0000\u0000"+ - "\u0259{\u0001\u0000\u0000\u0000:\u0087\u0090\u00a2\u00ae\u00b7\u00bf\u00c5"+ - "\u00cd\u00cf\u00d4\u00db\u00e0\u00eb\u00f1\u00f9\u00fb\u0106\u010d\u0118"+ - "\u011b\u012b\u0131\u013b\u013f\u0144\u014e\u0156\u0163\u0167\u016b\u0172"+ - "\u0176\u017d\u0183\u018a\u0192\u019a\u01a2\u01b3\u01be\u01c9\u01ce\u01d2"+ - "\u01d7\u01e2\u01e7\u01eb\u01f9\u0204\u0212\u021d\u0220\u0225\u023b\u0243"+ - "\u0246\u024b\u0258"; + "\u0004\u0001\u0080\u027b\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ + "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ + "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ + "\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007"+ + "\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002\u0012\u0007"+ + "\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002\u0015\u0007"+ + "\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002\u0018\u0007"+ + "\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002\u001b\u0007"+ + "\u001b\u0002\u001c\u0007\u001c\u0002\u001d\u0007\u001d\u0002\u001e\u0007"+ + "\u001e\u0002\u001f\u0007\u001f\u0002 \u0007 \u0002!\u0007!\u0002\"\u0007"+ + "\"\u0002#\u0007#\u0002$\u0007$\u0002%\u0007%\u0002&\u0007&\u0002\'\u0007"+ + "\'\u0002(\u0007(\u0002)\u0007)\u0002*\u0007*\u0002+\u0007+\u0002,\u0007"+ + ",\u0002-\u0007-\u0002.\u0007.\u0002/\u0007/\u00020\u00070\u00021\u0007"+ + "1\u00022\u00072\u00023\u00073\u00024\u00074\u00025\u00075\u00026\u0007"+ + "6\u00027\u00077\u00028\u00078\u00029\u00079\u0002:\u0007:\u0002;\u0007"+ + ";\u0002<\u0007<\u0002=\u0007=\u0002>\u0007>\u0002?\u0007?\u0002@\u0007"+ + "@\u0002A\u0007A\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001\u008e"+ + "\b\u0001\n\u0001\f\u0001\u0091\t\u0001\u0001\u0002\u0001\u0002\u0001\u0002"+ + "\u0001\u0002\u0001\u0002\u0001\u0002\u0003\u0002\u0099\b\u0002\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003"+ + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0003\u0003"+ + "\u00ad\b\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005"+ + "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005"+ + "\u00b9\b\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005"+ + "\u0005\u0005\u00c0\b\u0005\n\u0005\f\u0005\u00c3\t\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00ca\b\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00cf\b\u0005\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0005\u0005\u00d7"+ + "\b\u0005\n\u0005\f\u0005\u00da\t\u0005\u0001\u0006\u0001\u0006\u0003\u0006"+ + "\u00de\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006"+ + "\u0003\u0006\u00e5\b\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003\u0006"+ + "\u00ea\b\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b"+ + "\u0001\b\u0001\b\u0001\b\u0001\b\u0003\b\u00f5\b\b\u0001\t\u0001\t\u0001"+ + "\t\u0001\t\u0003\t\u00fb\b\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001"+ + "\t\u0005\t\u0103\b\t\n\t\f\t\u0106\t\t\u0001\n\u0001\n\u0001\n\u0001\n"+ + "\u0001\n\u0001\n\u0001\n\u0001\n\u0003\n\u0110\b\n\u0001\n\u0001\n\u0001"+ + "\n\u0005\n\u0115\b\n\n\n\f\n\u0118\t\n\u0001\u000b\u0001\u000b\u0001\u000b"+ + "\u0001\u000b\u0001\u000b\u0001\u000b\u0005\u000b\u0120\b\u000b\n\u000b"+ + "\f\u000b\u0123\t\u000b\u0003\u000b\u0125\b\u000b\u0001\u000b\u0001\u000b"+ + "\u0001\f\u0001\f\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ + "\u000f\u0001\u000f\u0001\u000f\u0005\u000f\u0133\b\u000f\n\u000f\f\u000f"+ + "\u0136\t\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0003\u0010\u013b\b"+ + "\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011\u0001"+ + "\u0011\u0005\u0011\u0143\b\u0011\n\u0011\f\u0011\u0146\t\u0011\u0001\u0011"+ + "\u0003\u0011\u0149\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012"+ + "\u014e\b\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013\u0001\u0014"+ + "\u0001\u0014\u0001\u0015\u0001\u0015\u0003\u0015\u0158\b\u0015\u0001\u0016"+ + "\u0001\u0016\u0001\u0016\u0001\u0016\u0005\u0016\u015e\b\u0016\n\u0016"+ + "\f\u0016\u0161\t\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017"+ + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0005\u0018\u016b\b\u0018"+ + "\n\u0018\f\u0018\u016e\t\u0018\u0001\u0018\u0003\u0018\u0171\b\u0018\u0001"+ + "\u0018\u0001\u0018\u0003\u0018\u0175\b\u0018\u0001\u0019\u0001\u0019\u0001"+ + "\u0019\u0001\u001a\u0001\u001a\u0003\u001a\u017c\b\u001a\u0001\u001a\u0001"+ + "\u001a\u0003\u001a\u0180\b\u001a\u0001\u001b\u0001\u001b\u0001\u001b\u0005"+ + "\u001b\u0185\b\u001b\n\u001b\f\u001b\u0188\t\u001b\u0001\u001c\u0001\u001c"+ + "\u0001\u001c\u0003\u001c\u018d\b\u001c\u0001\u001d\u0001\u001d\u0001\u001d"+ + "\u0005\u001d\u0192\b\u001d\n\u001d\f\u001d\u0195\t\u001d\u0001\u001e\u0001"+ + "\u001e\u0001\u001e\u0005\u001e\u019a\b\u001e\n\u001e\f\u001e\u019d\t\u001e"+ + "\u0001\u001f\u0001\u001f\u0001\u001f\u0005\u001f\u01a2\b\u001f\n\u001f"+ + "\f\u001f\u01a5\t\u001f\u0001 \u0001 \u0001!\u0001!\u0001!\u0003!\u01ac"+ + "\b!\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001"+ + "\"\u0001\"\u0001\"\u0001\"\u0001\"\u0005\"\u01bb\b\"\n\"\f\"\u01be\t\""+ + "\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0005\"\u01c6\b\"\n\""+ + "\f\"\u01c9\t\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0001\"\u0005\""+ + "\u01d1\b\"\n\"\f\"\u01d4\t\"\u0001\"\u0001\"\u0003\"\u01d8\b\"\u0001#"+ + "\u0001#\u0003#\u01dc\b#\u0001$\u0001$\u0001$\u0003$\u01e1\b$\u0001%\u0001"+ + "%\u0001%\u0001&\u0001&\u0001&\u0001&\u0005&\u01ea\b&\n&\f&\u01ed\t&\u0001"+ + "\'\u0001\'\u0003\'\u01f1\b\'\u0001\'\u0001\'\u0003\'\u01f5\b\'\u0001("+ + "\u0001(\u0001(\u0001)\u0001)\u0001)\u0001*\u0001*\u0001*\u0001*\u0005"+ + "*\u0201\b*\n*\f*\u0204\t*\u0001+\u0001+\u0001+\u0001+\u0001,\u0001,\u0001"+ + ",\u0001,\u0003,\u020e\b,\u0001-\u0001-\u0001-\u0001-\u0001.\u0001.\u0001"+ + ".\u0001/\u0001/\u0001/\u0005/\u021a\b/\n/\f/\u021d\t/\u00010\u00010\u0001"+ + "0\u00010\u00011\u00011\u00012\u00012\u00032\u0227\b2\u00013\u00033\u022a"+ + "\b3\u00013\u00013\u00014\u00034\u022f\b4\u00014\u00014\u00015\u00015\u0001"+ + "6\u00016\u00017\u00017\u00017\u00018\u00018\u00018\u00018\u00019\u0001"+ + "9\u00019\u0001:\u0001:\u0001:\u0001:\u0003:\u0245\b:\u0001:\u0001:\u0001"+ + ":\u0001:\u0005:\u024b\b:\n:\f:\u024e\t:\u0003:\u0250\b:\u0001;\u0001;"+ + "\u0001;\u0003;\u0255\b;\u0001;\u0001;\u0001<\u0001<\u0001<\u0001<\u0001"+ + "<\u0001=\u0001=\u0001=\u0001=\u0003=\u0262\b=\u0001>\u0003>\u0265\b>\u0001"+ + ">\u0001>\u0001>\u0001>\u0001?\u0001?\u0001?\u0003?\u026e\b?\u0001@\u0001"+ + "@\u0001@\u0001@\u0005@\u0274\b@\n@\f@\u0277\t@\u0001A\u0001A\u0001A\u0000"+ + "\u0004\u0002\n\u0012\u0014B\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010"+ + "\u0012\u0014\u0016\u0018\u001a\u001c\u001e \"$&(*,.02468:<>@BDFHJLNPR"+ + "TVXZ\\^`bdfhjlnprtvxz|~\u0080\u0082\u0000\t\u0001\u0000@A\u0001\u0000"+ + "BD\u0002\u0000\u001e\u001eQQ\u0001\u0000HI\u0002\u0000##((\u0002\u0000"+ + "++..\u0002\u0000**88\u0002\u000099;?\u0001\u0000\u0016\u0018\u0294\u0000"+ + "\u0084\u0001\u0000\u0000\u0000\u0002\u0087\u0001\u0000\u0000\u0000\u0004"+ + "\u0098\u0001\u0000\u0000\u0000\u0006\u00ac\u0001\u0000\u0000\u0000\b\u00ae"+ + "\u0001\u0000\u0000\u0000\n\u00ce\u0001\u0000\u0000\u0000\f\u00e9\u0001"+ + "\u0000\u0000\u0000\u000e\u00eb\u0001\u0000\u0000\u0000\u0010\u00f4\u0001"+ + "\u0000\u0000\u0000\u0012\u00fa\u0001\u0000\u0000\u0000\u0014\u010f\u0001"+ + "\u0000\u0000\u0000\u0016\u0119\u0001\u0000\u0000\u0000\u0018\u0128\u0001"+ + "\u0000\u0000\u0000\u001a\u012a\u0001\u0000\u0000\u0000\u001c\u012c\u0001"+ + "\u0000\u0000\u0000\u001e\u012f\u0001\u0000\u0000\u0000 \u013a\u0001\u0000"+ + "\u0000\u0000\"\u013e\u0001\u0000\u0000\u0000$\u014d\u0001\u0000\u0000"+ + "\u0000&\u0151\u0001\u0000\u0000\u0000(\u0153\u0001\u0000\u0000\u0000*"+ + "\u0157\u0001\u0000\u0000\u0000,\u0159\u0001\u0000\u0000\u0000.\u0162\u0001"+ + "\u0000\u0000\u00000\u0166\u0001\u0000\u0000\u00002\u0176\u0001\u0000\u0000"+ + "\u00004\u0179\u0001\u0000\u0000\u00006\u0181\u0001\u0000\u0000\u00008"+ + "\u0189\u0001\u0000\u0000\u0000:\u018e\u0001\u0000\u0000\u0000<\u0196\u0001"+ + "\u0000\u0000\u0000>\u019e\u0001\u0000\u0000\u0000@\u01a6\u0001\u0000\u0000"+ + "\u0000B\u01ab\u0001\u0000\u0000\u0000D\u01d7\u0001\u0000\u0000\u0000F"+ + "\u01db\u0001\u0000\u0000\u0000H\u01e0\u0001\u0000\u0000\u0000J\u01e2\u0001"+ + "\u0000\u0000\u0000L\u01e5\u0001\u0000\u0000\u0000N\u01ee\u0001\u0000\u0000"+ + "\u0000P\u01f6\u0001\u0000\u0000\u0000R\u01f9\u0001\u0000\u0000\u0000T"+ + "\u01fc\u0001\u0000\u0000\u0000V\u0205\u0001\u0000\u0000\u0000X\u0209\u0001"+ + "\u0000\u0000\u0000Z\u020f\u0001\u0000\u0000\u0000\\\u0213\u0001\u0000"+ + "\u0000\u0000^\u0216\u0001\u0000\u0000\u0000`\u021e\u0001\u0000\u0000\u0000"+ + "b\u0222\u0001\u0000\u0000\u0000d\u0226\u0001\u0000\u0000\u0000f\u0229"+ + "\u0001\u0000\u0000\u0000h\u022e\u0001\u0000\u0000\u0000j\u0232\u0001\u0000"+ + "\u0000\u0000l\u0234\u0001\u0000\u0000\u0000n\u0236\u0001\u0000\u0000\u0000"+ + "p\u0239\u0001\u0000\u0000\u0000r\u023d\u0001\u0000\u0000\u0000t\u0240"+ + "\u0001\u0000\u0000\u0000v\u0254\u0001\u0000\u0000\u0000x\u0258\u0001\u0000"+ + "\u0000\u0000z\u025d\u0001\u0000\u0000\u0000|\u0264\u0001\u0000\u0000\u0000"+ + "~\u026a\u0001\u0000\u0000\u0000\u0080\u026f\u0001\u0000\u0000\u0000\u0082"+ + "\u0278\u0001\u0000\u0000\u0000\u0084\u0085\u0003\u0002\u0001\u0000\u0085"+ + "\u0086\u0005\u0000\u0000\u0001\u0086\u0001\u0001\u0000\u0000\u0000\u0087"+ + "\u0088\u0006\u0001\uffff\uffff\u0000\u0088\u0089\u0003\u0004\u0002\u0000"+ + "\u0089\u008f\u0001\u0000\u0000\u0000\u008a\u008b\n\u0001\u0000\u0000\u008b"+ + "\u008c\u0005\u001d\u0000\u0000\u008c\u008e\u0003\u0006\u0003\u0000\u008d"+ + "\u008a\u0001\u0000\u0000\u0000\u008e\u0091\u0001\u0000\u0000\u0000\u008f"+ + "\u008d\u0001\u0000\u0000\u0000\u008f\u0090\u0001\u0000\u0000\u0000\u0090"+ + "\u0003\u0001\u0000\u0000\u0000\u0091\u008f\u0001\u0000\u0000\u0000\u0092"+ + "\u0099\u0003n7\u0000\u0093\u0099\u0003\"\u0011\u0000\u0094\u0099\u0003"+ + "\u001c\u000e\u0000\u0095\u0099\u0003r9\u0000\u0096\u0097\u0004\u0002\u0001"+ + "\u0000\u0097\u0099\u00030\u0018\u0000\u0098\u0092\u0001\u0000\u0000\u0000"+ + "\u0098\u0093\u0001\u0000\u0000\u0000\u0098\u0094\u0001\u0000\u0000\u0000"+ + "\u0098\u0095\u0001\u0000\u0000\u0000\u0098\u0096\u0001\u0000\u0000\u0000"+ + "\u0099\u0005\u0001\u0000\u0000\u0000\u009a\u00ad\u00032\u0019\u0000\u009b"+ + "\u00ad\u0003\b\u0004\u0000\u009c\u00ad\u0003P(\u0000\u009d\u00ad\u0003"+ + "J%\u0000\u009e\u00ad\u00034\u001a\u0000\u009f\u00ad\u0003L&\u0000\u00a0"+ + "\u00ad\u0003R)\u0000\u00a1\u00ad\u0003T*\u0000\u00a2\u00ad\u0003X,\u0000"+ + "\u00a3\u00ad\u0003Z-\u0000\u00a4\u00ad\u0003t:\u0000\u00a5\u00ad\u0003"+ + "\\.\u0000\u00a6\u00a7\u0004\u0003\u0002\u0000\u00a7\u00ad\u0003z=\u0000"+ + "\u00a8\u00a9\u0004\u0003\u0003\u0000\u00a9\u00ad\u0003x<\u0000\u00aa\u00ab"+ + "\u0004\u0003\u0004\u0000\u00ab\u00ad\u0003|>\u0000\u00ac\u009a\u0001\u0000"+ + "\u0000\u0000\u00ac\u009b\u0001\u0000\u0000\u0000\u00ac\u009c\u0001\u0000"+ + "\u0000\u0000\u00ac\u009d\u0001\u0000\u0000\u0000\u00ac\u009e\u0001\u0000"+ + "\u0000\u0000\u00ac\u009f\u0001\u0000\u0000\u0000\u00ac\u00a0\u0001\u0000"+ + "\u0000\u0000\u00ac\u00a1\u0001\u0000\u0000\u0000\u00ac\u00a2\u0001\u0000"+ + "\u0000\u0000\u00ac\u00a3\u0001\u0000\u0000\u0000\u00ac\u00a4\u0001\u0000"+ + "\u0000\u0000\u00ac\u00a5\u0001\u0000\u0000\u0000\u00ac\u00a6\u0001\u0000"+ + "\u0000\u0000\u00ac\u00a8\u0001\u0000\u0000\u0000\u00ac\u00aa\u0001\u0000"+ + "\u0000\u0000\u00ad\u0007\u0001\u0000\u0000\u0000\u00ae\u00af\u0005\u0010"+ + "\u0000\u0000\u00af\u00b0\u0003\n\u0005\u0000\u00b0\t\u0001\u0000\u0000"+ + "\u0000\u00b1\u00b2\u0006\u0005\uffff\uffff\u0000\u00b2\u00b3\u00051\u0000"+ + "\u0000\u00b3\u00cf\u0003\n\u0005\b\u00b4\u00cf\u0003\u0010\b\u0000\u00b5"+ + "\u00cf\u0003\f\u0006\u0000\u00b6\u00b8\u0003\u0010\b\u0000\u00b7\u00b9"+ + "\u00051\u0000\u0000\u00b8\u00b7\u0001\u0000\u0000\u0000\u00b8\u00b9\u0001"+ + "\u0000\u0000\u0000\u00b9\u00ba\u0001\u0000\u0000\u0000\u00ba\u00bb\u0005"+ + ",\u0000\u0000\u00bb\u00bc\u00050\u0000\u0000\u00bc\u00c1\u0003\u0010\b"+ + "\u0000\u00bd\u00be\u0005\'\u0000\u0000\u00be\u00c0\u0003\u0010\b\u0000"+ + "\u00bf\u00bd\u0001\u0000\u0000\u0000\u00c0\u00c3\u0001\u0000\u0000\u0000"+ + "\u00c1\u00bf\u0001\u0000\u0000\u0000\u00c1\u00c2\u0001\u0000\u0000\u0000"+ + "\u00c2\u00c4\u0001\u0000\u0000\u0000\u00c3\u00c1\u0001\u0000\u0000\u0000"+ + "\u00c4\u00c5\u00057\u0000\u0000\u00c5\u00cf\u0001\u0000\u0000\u0000\u00c6"+ + "\u00c7\u0003\u0010\b\u0000\u00c7\u00c9\u0005-\u0000\u0000\u00c8\u00ca"+ + "\u00051\u0000\u0000\u00c9\u00c8\u0001\u0000\u0000\u0000\u00c9\u00ca\u0001"+ + "\u0000\u0000\u0000\u00ca\u00cb\u0001\u0000\u0000\u0000\u00cb\u00cc\u0005"+ + "2\u0000\u0000\u00cc\u00cf\u0001\u0000\u0000\u0000\u00cd\u00cf\u0003\u000e"+ + "\u0007\u0000\u00ce\u00b1\u0001\u0000\u0000\u0000\u00ce\u00b4\u0001\u0000"+ + "\u0000\u0000\u00ce\u00b5\u0001\u0000\u0000\u0000\u00ce\u00b6\u0001\u0000"+ + "\u0000\u0000\u00ce\u00c6\u0001\u0000\u0000\u0000\u00ce\u00cd\u0001\u0000"+ + "\u0000\u0000\u00cf\u00d8\u0001\u0000\u0000\u0000\u00d0\u00d1\n\u0005\u0000"+ + "\u0000\u00d1\u00d2\u0005\"\u0000\u0000\u00d2\u00d7\u0003\n\u0005\u0006"+ + "\u00d3\u00d4\n\u0004\u0000\u0000\u00d4\u00d5\u00054\u0000\u0000\u00d5"+ + "\u00d7\u0003\n\u0005\u0005\u00d6\u00d0\u0001\u0000\u0000\u0000\u00d6\u00d3"+ + "\u0001\u0000\u0000\u0000\u00d7\u00da\u0001\u0000\u0000\u0000\u00d8\u00d6"+ + "\u0001\u0000\u0000\u0000\u00d8\u00d9\u0001\u0000\u0000\u0000\u00d9\u000b"+ + "\u0001\u0000\u0000\u0000\u00da\u00d8\u0001\u0000\u0000\u0000\u00db\u00dd"+ + "\u0003\u0010\b\u0000\u00dc\u00de\u00051\u0000\u0000\u00dd\u00dc\u0001"+ + "\u0000\u0000\u0000\u00dd\u00de\u0001\u0000\u0000\u0000\u00de\u00df\u0001"+ + "\u0000\u0000\u0000\u00df\u00e0\u0005/\u0000\u0000\u00e0\u00e1\u0003j5"+ + "\u0000\u00e1\u00ea\u0001\u0000\u0000\u0000\u00e2\u00e4\u0003\u0010\b\u0000"+ + "\u00e3\u00e5\u00051\u0000\u0000\u00e4\u00e3\u0001\u0000\u0000\u0000\u00e4"+ + "\u00e5\u0001\u0000\u0000\u0000\u00e5\u00e6\u0001\u0000\u0000\u0000\u00e6"+ + "\u00e7\u00056\u0000\u0000\u00e7\u00e8\u0003j5\u0000\u00e8\u00ea\u0001"+ + "\u0000\u0000\u0000\u00e9\u00db\u0001\u0000\u0000\u0000\u00e9\u00e2\u0001"+ + "\u0000\u0000\u0000\u00ea\r\u0001\u0000\u0000\u0000\u00eb\u00ec\u0003:"+ + "\u001d\u0000\u00ec\u00ed\u0005&\u0000\u0000\u00ed\u00ee\u0003D\"\u0000"+ + "\u00ee\u000f\u0001\u0000\u0000\u0000\u00ef\u00f5\u0003\u0012\t\u0000\u00f0"+ + "\u00f1\u0003\u0012\t\u0000\u00f1\u00f2\u0003l6\u0000\u00f2\u00f3\u0003"+ + "\u0012\t\u0000\u00f3\u00f5\u0001\u0000\u0000\u0000\u00f4\u00ef\u0001\u0000"+ + "\u0000\u0000\u00f4\u00f0\u0001\u0000\u0000\u0000\u00f5\u0011\u0001\u0000"+ + "\u0000\u0000\u00f6\u00f7\u0006\t\uffff\uffff\u0000\u00f7\u00fb\u0003\u0014"+ + "\n\u0000\u00f8\u00f9\u0007\u0000\u0000\u0000\u00f9\u00fb\u0003\u0012\t"+ + "\u0003\u00fa\u00f6\u0001\u0000\u0000\u0000\u00fa\u00f8\u0001\u0000\u0000"+ + "\u0000\u00fb\u0104\u0001\u0000\u0000\u0000\u00fc\u00fd\n\u0002\u0000\u0000"+ + "\u00fd\u00fe\u0007\u0001\u0000\u0000\u00fe\u0103\u0003\u0012\t\u0003\u00ff"+ + "\u0100\n\u0001\u0000\u0000\u0100\u0101\u0007\u0000\u0000\u0000\u0101\u0103"+ + "\u0003\u0012\t\u0002\u0102\u00fc\u0001\u0000\u0000\u0000\u0102\u00ff\u0001"+ + "\u0000\u0000\u0000\u0103\u0106\u0001\u0000\u0000\u0000\u0104\u0102\u0001"+ + "\u0000\u0000\u0000\u0104\u0105\u0001\u0000\u0000\u0000\u0105\u0013\u0001"+ + "\u0000\u0000\u0000\u0106\u0104\u0001\u0000\u0000\u0000\u0107\u0108\u0006"+ + "\n\uffff\uffff\u0000\u0108\u0110\u0003D\"\u0000\u0109\u0110\u0003:\u001d"+ + "\u0000\u010a\u0110\u0003\u0016\u000b\u0000\u010b\u010c\u00050\u0000\u0000"+ + "\u010c\u010d\u0003\n\u0005\u0000\u010d\u010e\u00057\u0000\u0000\u010e"+ + "\u0110\u0001\u0000\u0000\u0000\u010f\u0107\u0001\u0000\u0000\u0000\u010f"+ + "\u0109\u0001\u0000\u0000\u0000\u010f\u010a\u0001\u0000\u0000\u0000\u010f"+ + "\u010b\u0001\u0000\u0000\u0000\u0110\u0116\u0001\u0000\u0000\u0000\u0111"+ + "\u0112\n\u0001\u0000\u0000\u0112\u0113\u0005%\u0000\u0000\u0113\u0115"+ + "\u0003\u001a\r\u0000\u0114\u0111\u0001\u0000\u0000\u0000\u0115\u0118\u0001"+ + "\u0000\u0000\u0000\u0116\u0114\u0001\u0000\u0000\u0000\u0116\u0117\u0001"+ + "\u0000\u0000\u0000\u0117\u0015\u0001\u0000\u0000\u0000\u0118\u0116\u0001"+ + "\u0000\u0000\u0000\u0119\u011a\u0003\u0018\f\u0000\u011a\u0124\u00050"+ + "\u0000\u0000\u011b\u0125\u0005B\u0000\u0000\u011c\u0121\u0003\n\u0005"+ + "\u0000\u011d\u011e\u0005\'\u0000\u0000\u011e\u0120\u0003\n\u0005\u0000"+ + "\u011f\u011d\u0001\u0000\u0000\u0000\u0120\u0123\u0001\u0000\u0000\u0000"+ + "\u0121\u011f\u0001\u0000\u0000\u0000\u0121\u0122\u0001\u0000\u0000\u0000"+ + "\u0122\u0125\u0001\u0000\u0000\u0000\u0123\u0121\u0001\u0000\u0000\u0000"+ + "\u0124\u011b\u0001\u0000\u0000\u0000\u0124\u011c\u0001\u0000\u0000\u0000"+ + "\u0124\u0125\u0001\u0000\u0000\u0000\u0125\u0126\u0001\u0000\u0000\u0000"+ + "\u0126\u0127\u00057\u0000\u0000\u0127\u0017\u0001\u0000\u0000\u0000\u0128"+ + "\u0129\u0003H$\u0000\u0129\u0019\u0001\u0000\u0000\u0000\u012a\u012b\u0003"+ + "@ \u0000\u012b\u001b\u0001\u0000\u0000\u0000\u012c\u012d\u0005\f\u0000"+ + "\u0000\u012d\u012e\u0003\u001e\u000f\u0000\u012e\u001d\u0001\u0000\u0000"+ + "\u0000\u012f\u0134\u0003 \u0010\u0000\u0130\u0131\u0005\'\u0000\u0000"+ + "\u0131\u0133\u0003 \u0010\u0000\u0132\u0130\u0001\u0000\u0000\u0000\u0133"+ + "\u0136\u0001\u0000\u0000\u0000\u0134\u0132\u0001\u0000\u0000\u0000\u0134"+ + "\u0135\u0001\u0000\u0000\u0000\u0135\u001f\u0001\u0000\u0000\u0000\u0136"+ + "\u0134\u0001\u0000\u0000\u0000\u0137\u0138\u0003:\u001d\u0000\u0138\u0139"+ + "\u0005$\u0000\u0000\u0139\u013b\u0001\u0000\u0000\u0000\u013a\u0137\u0001"+ + "\u0000\u0000\u0000\u013a\u013b\u0001\u0000\u0000\u0000\u013b\u013c\u0001"+ + "\u0000\u0000\u0000\u013c\u013d\u0003\n\u0005\u0000\u013d!\u0001\u0000"+ + "\u0000\u0000\u013e\u013f\u0005\u0006\u0000\u0000\u013f\u0144\u0003$\u0012"+ + "\u0000\u0140\u0141\u0005\'\u0000\u0000\u0141\u0143\u0003$\u0012\u0000"+ + "\u0142\u0140\u0001\u0000\u0000\u0000\u0143\u0146\u0001\u0000\u0000\u0000"+ + "\u0144\u0142\u0001\u0000\u0000\u0000\u0144\u0145\u0001\u0000\u0000\u0000"+ + "\u0145\u0148\u0001\u0000\u0000\u0000\u0146\u0144\u0001\u0000\u0000\u0000"+ + "\u0147\u0149\u0003*\u0015\u0000\u0148\u0147\u0001\u0000\u0000\u0000\u0148"+ + "\u0149\u0001\u0000\u0000\u0000\u0149#\u0001\u0000\u0000\u0000\u014a\u014b"+ + "\u0003&\u0013\u0000\u014b\u014c\u0005&\u0000\u0000\u014c\u014e\u0001\u0000"+ + "\u0000\u0000\u014d\u014a\u0001\u0000\u0000\u0000\u014d\u014e\u0001\u0000"+ + "\u0000\u0000\u014e\u014f\u0001\u0000\u0000\u0000\u014f\u0150\u0003(\u0014"+ + "\u0000\u0150%\u0001\u0000\u0000\u0000\u0151\u0152\u0005Q\u0000\u0000\u0152"+ + "\'\u0001\u0000\u0000\u0000\u0153\u0154\u0007\u0002\u0000\u0000\u0154)"+ + "\u0001\u0000\u0000\u0000\u0155\u0158\u0003,\u0016\u0000\u0156\u0158\u0003"+ + ".\u0017\u0000\u0157\u0155\u0001\u0000\u0000\u0000\u0157\u0156\u0001\u0000"+ + "\u0000\u0000\u0158+\u0001\u0000\u0000\u0000\u0159\u015a\u0005P\u0000\u0000"+ + "\u015a\u015f\u0005Q\u0000\u0000\u015b\u015c\u0005\'\u0000\u0000\u015c"+ + "\u015e\u0005Q\u0000\u0000\u015d\u015b\u0001\u0000\u0000\u0000\u015e\u0161"+ + "\u0001\u0000\u0000\u0000\u015f\u015d\u0001\u0000\u0000\u0000\u015f\u0160"+ + "\u0001\u0000\u0000\u0000\u0160-\u0001\u0000\u0000\u0000\u0161\u015f\u0001"+ + "\u0000\u0000\u0000\u0162\u0163\u0005F\u0000\u0000\u0163\u0164\u0003,\u0016"+ + "\u0000\u0164\u0165\u0005G\u0000\u0000\u0165/\u0001\u0000\u0000\u0000\u0166"+ + "\u0167\u0005\u0013\u0000\u0000\u0167\u016c\u0003$\u0012\u0000\u0168\u0169"+ + "\u0005\'\u0000\u0000\u0169\u016b\u0003$\u0012\u0000\u016a\u0168\u0001"+ + "\u0000\u0000\u0000\u016b\u016e\u0001\u0000\u0000\u0000\u016c\u016a\u0001"+ + "\u0000\u0000\u0000\u016c\u016d\u0001\u0000\u0000\u0000\u016d\u0170\u0001"+ + "\u0000\u0000\u0000\u016e\u016c\u0001\u0000\u0000\u0000\u016f\u0171\u0003"+ + "6\u001b\u0000\u0170\u016f\u0001\u0000\u0000\u0000\u0170\u0171\u0001\u0000"+ + "\u0000\u0000\u0171\u0174\u0001\u0000\u0000\u0000\u0172\u0173\u0005!\u0000"+ + "\u0000\u0173\u0175\u0003\u001e\u000f\u0000\u0174\u0172\u0001\u0000\u0000"+ + "\u0000\u0174\u0175\u0001\u0000\u0000\u0000\u01751\u0001\u0000\u0000\u0000"+ + "\u0176\u0177\u0005\u0004\u0000\u0000\u0177\u0178\u0003\u001e\u000f\u0000"+ + "\u01783\u0001\u0000\u0000\u0000\u0179\u017b\u0005\u000f\u0000\u0000\u017a"+ + "\u017c\u00036\u001b\u0000\u017b\u017a\u0001\u0000\u0000\u0000\u017b\u017c"+ + "\u0001\u0000\u0000\u0000\u017c\u017f\u0001\u0000\u0000\u0000\u017d\u017e"+ + "\u0005!\u0000\u0000\u017e\u0180\u0003\u001e\u000f\u0000\u017f\u017d\u0001"+ + "\u0000\u0000\u0000\u017f\u0180\u0001\u0000\u0000\u0000\u01805\u0001\u0000"+ + "\u0000\u0000\u0181\u0186\u00038\u001c\u0000\u0182\u0183\u0005\'\u0000"+ + "\u0000\u0183\u0185\u00038\u001c\u0000\u0184\u0182\u0001\u0000\u0000\u0000"+ + "\u0185\u0188\u0001\u0000\u0000\u0000\u0186\u0184\u0001\u0000\u0000\u0000"+ + "\u0186\u0187\u0001\u0000\u0000\u0000\u01877\u0001\u0000\u0000\u0000\u0188"+ + "\u0186\u0001\u0000\u0000\u0000\u0189\u018c\u0003 \u0010\u0000\u018a\u018b"+ + "\u0005\u0010\u0000\u0000\u018b\u018d\u0003\n\u0005\u0000\u018c\u018a\u0001"+ + "\u0000\u0000\u0000\u018c\u018d\u0001\u0000\u0000\u0000\u018d9\u0001\u0000"+ + "\u0000\u0000\u018e\u0193\u0003H$\u0000\u018f\u0190\u0005)\u0000\u0000"+ + "\u0190\u0192\u0003H$\u0000\u0191\u018f\u0001\u0000\u0000\u0000\u0192\u0195"+ + "\u0001\u0000\u0000\u0000\u0193\u0191\u0001\u0000\u0000\u0000\u0193\u0194"+ + "\u0001\u0000\u0000\u0000\u0194;\u0001\u0000\u0000\u0000\u0195\u0193\u0001"+ + "\u0000\u0000\u0000\u0196\u019b\u0003B!\u0000\u0197\u0198\u0005)\u0000"+ + "\u0000\u0198\u019a\u0003B!\u0000\u0199\u0197\u0001\u0000\u0000\u0000\u019a"+ + "\u019d\u0001\u0000\u0000\u0000\u019b\u0199\u0001\u0000\u0000\u0000\u019b"+ + "\u019c\u0001\u0000\u0000\u0000\u019c=\u0001\u0000\u0000\u0000\u019d\u019b"+ + "\u0001\u0000\u0000\u0000\u019e\u01a3\u0003<\u001e\u0000\u019f\u01a0\u0005"+ + "\'\u0000\u0000\u01a0\u01a2\u0003<\u001e\u0000\u01a1\u019f\u0001\u0000"+ + "\u0000\u0000\u01a2\u01a5\u0001\u0000\u0000\u0000\u01a3\u01a1\u0001\u0000"+ + "\u0000\u0000\u01a3\u01a4\u0001\u0000\u0000\u0000\u01a4?\u0001\u0000\u0000"+ + "\u0000\u01a5\u01a3\u0001\u0000\u0000\u0000\u01a6\u01a7\u0007\u0003\u0000"+ + "\u0000\u01a7A\u0001\u0000\u0000\u0000\u01a8\u01ac\u0005U\u0000\u0000\u01a9"+ + "\u01aa\u0004!\n\u0000\u01aa\u01ac\u0003F#\u0000\u01ab\u01a8\u0001\u0000"+ + "\u0000\u0000\u01ab\u01a9\u0001\u0000\u0000\u0000\u01acC\u0001\u0000\u0000"+ + "\u0000\u01ad\u01d8\u00052\u0000\u0000\u01ae\u01af\u0003h4\u0000\u01af"+ + "\u01b0\u0005H\u0000\u0000\u01b0\u01d8\u0001\u0000\u0000\u0000\u01b1\u01d8"+ + "\u0003f3\u0000\u01b2\u01d8\u0003h4\u0000\u01b3\u01d8\u0003b1\u0000\u01b4"+ + "\u01d8\u0003F#\u0000\u01b5\u01d8\u0003j5\u0000\u01b6\u01b7\u0005F\u0000"+ + "\u0000\u01b7\u01bc\u0003d2\u0000\u01b8\u01b9\u0005\'\u0000\u0000\u01b9"+ + "\u01bb\u0003d2\u0000\u01ba\u01b8\u0001\u0000\u0000\u0000\u01bb\u01be\u0001"+ + "\u0000\u0000\u0000\u01bc\u01ba\u0001\u0000\u0000\u0000\u01bc\u01bd\u0001"+ + "\u0000\u0000\u0000\u01bd\u01bf\u0001\u0000\u0000\u0000\u01be\u01bc\u0001"+ + "\u0000\u0000\u0000\u01bf\u01c0\u0005G\u0000\u0000\u01c0\u01d8\u0001\u0000"+ + "\u0000\u0000\u01c1\u01c2\u0005F\u0000\u0000\u01c2\u01c7\u0003b1\u0000"+ + "\u01c3\u01c4\u0005\'\u0000\u0000\u01c4\u01c6\u0003b1\u0000\u01c5\u01c3"+ + "\u0001\u0000\u0000\u0000\u01c6\u01c9\u0001\u0000\u0000\u0000\u01c7\u01c5"+ + "\u0001\u0000\u0000\u0000\u01c7\u01c8\u0001\u0000\u0000\u0000\u01c8\u01ca"+ + "\u0001\u0000\u0000\u0000\u01c9\u01c7\u0001\u0000\u0000\u0000\u01ca\u01cb"+ + "\u0005G\u0000\u0000\u01cb\u01d8\u0001\u0000\u0000\u0000\u01cc\u01cd\u0005"+ + "F\u0000\u0000\u01cd\u01d2\u0003j5\u0000\u01ce\u01cf\u0005\'\u0000\u0000"+ + "\u01cf\u01d1\u0003j5\u0000\u01d0\u01ce\u0001\u0000\u0000\u0000\u01d1\u01d4"+ + "\u0001\u0000\u0000\u0000\u01d2\u01d0\u0001\u0000\u0000\u0000\u01d2\u01d3"+ + "\u0001\u0000\u0000\u0000\u01d3\u01d5\u0001\u0000\u0000\u0000\u01d4\u01d2"+ + "\u0001\u0000\u0000\u0000\u01d5\u01d6\u0005G\u0000\u0000\u01d6\u01d8\u0001"+ + "\u0000\u0000\u0000\u01d7\u01ad\u0001\u0000\u0000\u0000\u01d7\u01ae\u0001"+ + "\u0000\u0000\u0000\u01d7\u01b1\u0001\u0000\u0000\u0000\u01d7\u01b2\u0001"+ + "\u0000\u0000\u0000\u01d7\u01b3\u0001\u0000\u0000\u0000\u01d7\u01b4\u0001"+ + "\u0000\u0000\u0000\u01d7\u01b5\u0001\u0000\u0000\u0000\u01d7\u01b6\u0001"+ + "\u0000\u0000\u0000\u01d7\u01c1\u0001\u0000\u0000\u0000\u01d7\u01cc\u0001"+ + "\u0000\u0000\u0000\u01d8E\u0001\u0000\u0000\u0000\u01d9\u01dc\u00055\u0000"+ + "\u0000\u01da\u01dc\u0005E\u0000\u0000\u01db\u01d9\u0001\u0000\u0000\u0000"+ + "\u01db\u01da\u0001\u0000\u0000\u0000\u01dcG\u0001\u0000\u0000\u0000\u01dd"+ + "\u01e1\u0003@ \u0000\u01de\u01df\u0004$\u000b\u0000\u01df\u01e1\u0003"+ + "F#\u0000\u01e0\u01dd\u0001\u0000\u0000\u0000\u01e0\u01de\u0001\u0000\u0000"+ + "\u0000\u01e1I\u0001\u0000\u0000\u0000\u01e2\u01e3\u0005\t\u0000\u0000"+ + "\u01e3\u01e4\u0005\u001f\u0000\u0000\u01e4K\u0001\u0000\u0000\u0000\u01e5"+ + "\u01e6\u0005\u000e\u0000\u0000\u01e6\u01eb\u0003N\'\u0000\u01e7\u01e8"+ + "\u0005\'\u0000\u0000\u01e8\u01ea\u0003N\'\u0000\u01e9\u01e7\u0001\u0000"+ + "\u0000\u0000\u01ea\u01ed\u0001\u0000\u0000\u0000\u01eb\u01e9\u0001\u0000"+ + "\u0000\u0000\u01eb\u01ec\u0001\u0000\u0000\u0000\u01ecM\u0001\u0000\u0000"+ + "\u0000\u01ed\u01eb\u0001\u0000\u0000\u0000\u01ee\u01f0\u0003\n\u0005\u0000"+ + "\u01ef\u01f1\u0007\u0004\u0000\u0000\u01f0\u01ef\u0001\u0000\u0000\u0000"+ + "\u01f0\u01f1\u0001\u0000\u0000\u0000\u01f1\u01f4\u0001\u0000\u0000\u0000"+ + "\u01f2\u01f3\u00053\u0000\u0000\u01f3\u01f5\u0007\u0005\u0000\u0000\u01f4"+ + "\u01f2\u0001\u0000\u0000\u0000\u01f4\u01f5\u0001\u0000\u0000\u0000\u01f5"+ + "O\u0001\u0000\u0000\u0000\u01f6\u01f7\u0005\b\u0000\u0000\u01f7\u01f8"+ + "\u0003>\u001f\u0000\u01f8Q\u0001\u0000\u0000\u0000\u01f9\u01fa\u0005\u0002"+ + "\u0000\u0000\u01fa\u01fb\u0003>\u001f\u0000\u01fbS\u0001\u0000\u0000\u0000"+ + "\u01fc\u01fd\u0005\u000b\u0000\u0000\u01fd\u0202\u0003V+\u0000\u01fe\u01ff"+ + "\u0005\'\u0000\u0000\u01ff\u0201\u0003V+\u0000\u0200\u01fe\u0001\u0000"+ + "\u0000\u0000\u0201\u0204\u0001\u0000\u0000\u0000\u0202\u0200\u0001\u0000"+ + "\u0000\u0000\u0202\u0203\u0001\u0000\u0000\u0000\u0203U\u0001\u0000\u0000"+ + "\u0000\u0204\u0202\u0001\u0000\u0000\u0000\u0205\u0206\u0003<\u001e\u0000"+ + "\u0206\u0207\u0005Y\u0000\u0000\u0207\u0208\u0003<\u001e\u0000\u0208W"+ + "\u0001\u0000\u0000\u0000\u0209\u020a\u0005\u0001\u0000\u0000\u020a\u020b"+ + "\u0003\u0014\n\u0000\u020b\u020d\u0003j5\u0000\u020c\u020e\u0003^/\u0000"+ + "\u020d\u020c\u0001\u0000\u0000\u0000\u020d\u020e\u0001\u0000\u0000\u0000"+ + "\u020eY\u0001\u0000\u0000\u0000\u020f\u0210\u0005\u0007\u0000\u0000\u0210"+ + "\u0211\u0003\u0014\n\u0000\u0211\u0212\u0003j5\u0000\u0212[\u0001\u0000"+ + "\u0000\u0000\u0213\u0214\u0005\n\u0000\u0000\u0214\u0215\u0003:\u001d"+ + "\u0000\u0215]\u0001\u0000\u0000\u0000\u0216\u021b\u0003`0\u0000\u0217"+ + "\u0218\u0005\'\u0000\u0000\u0218\u021a\u0003`0\u0000\u0219\u0217\u0001"+ + "\u0000\u0000\u0000\u021a\u021d\u0001\u0000\u0000\u0000\u021b\u0219\u0001"+ + "\u0000\u0000\u0000\u021b\u021c\u0001\u0000\u0000\u0000\u021c_\u0001\u0000"+ + "\u0000\u0000\u021d\u021b\u0001\u0000\u0000\u0000\u021e\u021f\u0003@ \u0000"+ + "\u021f\u0220\u0005$\u0000\u0000\u0220\u0221\u0003D\"\u0000\u0221a\u0001"+ + "\u0000\u0000\u0000\u0222\u0223\u0007\u0006\u0000\u0000\u0223c\u0001\u0000"+ + "\u0000\u0000\u0224\u0227\u0003f3\u0000\u0225\u0227\u0003h4\u0000\u0226"+ + "\u0224\u0001\u0000\u0000\u0000\u0226\u0225\u0001\u0000\u0000\u0000\u0227"+ + "e\u0001\u0000\u0000\u0000\u0228\u022a\u0007\u0000\u0000\u0000\u0229\u0228"+ + "\u0001\u0000\u0000\u0000\u0229\u022a\u0001\u0000\u0000\u0000\u022a\u022b"+ + "\u0001\u0000\u0000\u0000\u022b\u022c\u0005 \u0000\u0000\u022cg\u0001\u0000"+ + "\u0000\u0000\u022d\u022f\u0007\u0000\u0000\u0000\u022e\u022d\u0001\u0000"+ + "\u0000\u0000\u022e\u022f\u0001\u0000\u0000\u0000\u022f\u0230\u0001\u0000"+ + "\u0000\u0000\u0230\u0231\u0005\u001f\u0000\u0000\u0231i\u0001\u0000\u0000"+ + "\u0000\u0232\u0233\u0005\u001e\u0000\u0000\u0233k\u0001\u0000\u0000\u0000"+ + "\u0234\u0235\u0007\u0007\u0000\u0000\u0235m\u0001\u0000\u0000\u0000\u0236"+ + "\u0237\u0005\u0005\u0000\u0000\u0237\u0238\u0003p8\u0000\u0238o\u0001"+ + "\u0000\u0000\u0000\u0239\u023a\u0005F\u0000\u0000\u023a\u023b\u0003\u0002"+ + "\u0001\u0000\u023b\u023c\u0005G\u0000\u0000\u023cq\u0001\u0000\u0000\u0000"+ + "\u023d\u023e\u0005\r\u0000\u0000\u023e\u023f\u0005i\u0000\u0000\u023f"+ + "s\u0001\u0000\u0000\u0000\u0240\u0241\u0005\u0003\u0000\u0000\u0241\u0244"+ + "\u0005_\u0000\u0000\u0242\u0243\u0005]\u0000\u0000\u0243\u0245\u0003<"+ + "\u001e\u0000\u0244\u0242\u0001\u0000\u0000\u0000\u0244\u0245\u0001\u0000"+ + "\u0000\u0000\u0245\u024f\u0001\u0000\u0000\u0000\u0246\u0247\u0005^\u0000"+ + "\u0000\u0247\u024c\u0003v;\u0000\u0248\u0249\u0005\'\u0000\u0000\u0249"+ + "\u024b\u0003v;\u0000\u024a\u0248\u0001\u0000\u0000\u0000\u024b\u024e\u0001"+ + "\u0000\u0000\u0000\u024c\u024a\u0001\u0000\u0000\u0000\u024c\u024d\u0001"+ + "\u0000\u0000\u0000\u024d\u0250\u0001\u0000\u0000\u0000\u024e\u024c\u0001"+ + "\u0000\u0000\u0000\u024f\u0246\u0001\u0000\u0000\u0000\u024f\u0250\u0001"+ + "\u0000\u0000\u0000\u0250u\u0001\u0000\u0000\u0000\u0251\u0252\u0003<\u001e"+ + "\u0000\u0252\u0253\u0005$\u0000\u0000\u0253\u0255\u0001\u0000\u0000\u0000"+ + "\u0254\u0251\u0001\u0000\u0000\u0000\u0254\u0255\u0001\u0000\u0000\u0000"+ + "\u0255\u0256\u0001\u0000\u0000\u0000\u0256\u0257\u0003<\u001e\u0000\u0257"+ + "w\u0001\u0000\u0000\u0000\u0258\u0259\u0005\u0012\u0000\u0000\u0259\u025a"+ + "\u0003$\u0012\u0000\u025a\u025b\u0005]\u0000\u0000\u025b\u025c\u0003>"+ + "\u001f\u0000\u025cy\u0001\u0000\u0000\u0000\u025d\u025e\u0005\u0011\u0000"+ + "\u0000\u025e\u0261\u00036\u001b\u0000\u025f\u0260\u0005!\u0000\u0000\u0260"+ + "\u0262\u0003\u001e\u000f\u0000\u0261\u025f\u0001\u0000\u0000\u0000\u0261"+ + "\u0262\u0001\u0000\u0000\u0000\u0262{\u0001\u0000\u0000\u0000\u0263\u0265"+ + "\u0007\b\u0000\u0000\u0264\u0263\u0001\u0000\u0000\u0000\u0264\u0265\u0001"+ + "\u0000\u0000\u0000\u0265\u0266\u0001\u0000\u0000\u0000\u0266\u0267\u0005"+ + "\u0014\u0000\u0000\u0267\u0268\u0003~?\u0000\u0268\u0269\u0003\u0080@"+ + "\u0000\u0269}\u0001\u0000\u0000\u0000\u026a\u026d\u0003@ \u0000\u026b"+ + "\u026c\u0005Y\u0000\u0000\u026c\u026e\u0003@ \u0000\u026d\u026b\u0001"+ + "\u0000\u0000\u0000\u026d\u026e\u0001\u0000\u0000\u0000\u026e\u007f\u0001"+ + "\u0000\u0000\u0000\u026f\u0270\u0005]\u0000\u0000\u0270\u0275\u0003\u0082"+ + "A\u0000\u0271\u0272\u0005\'\u0000\u0000\u0272\u0274\u0003\u0082A\u0000"+ + "\u0273\u0271\u0001\u0000\u0000\u0000\u0274\u0277\u0001\u0000\u0000\u0000"+ + "\u0275\u0273\u0001\u0000\u0000\u0000\u0275\u0276\u0001\u0000\u0000\u0000"+ + "\u0276\u0081\u0001\u0000\u0000\u0000\u0277\u0275\u0001\u0000\u0000\u0000"+ + "\u0278\u0279\u0003\u0010\b\u0000\u0279\u0083\u0001\u0000\u0000\u0000="+ + "\u008f\u0098\u00ac\u00b8\u00c1\u00c9\u00ce\u00d6\u00d8\u00dd\u00e4\u00e9"+ + "\u00f4\u00fa\u0102\u0104\u010f\u0116\u0121\u0124\u0134\u013a\u0144\u0148"+ + "\u014d\u0157\u015f\u016c\u0170\u0174\u017b\u017f\u0186\u018c\u0193\u019b"+ + "\u01a3\u01ab\u01bc\u01c7\u01d2\u01d7\u01db\u01e0\u01eb\u01f0\u01f4\u0202"+ + "\u020d\u021b\u0226\u0229\u022e\u0244\u024c\u024f\u0254\u0261\u0264\u026d"+ + "\u0275"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java index 556a97657635a..6071219839bab 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseListener.java @@ -1052,6 +1052,54 @@ public class EsqlBaseParserBaseListener implements EsqlBaseParserListener { *

    The default implementation does nothing.

    */ @Override public void exitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterJoinCommand(EsqlBaseParser.JoinCommandContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitJoinCommand(EsqlBaseParser.JoinCommandContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterJoinTarget(EsqlBaseParser.JoinTargetContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitJoinTarget(EsqlBaseParser.JoinTargetContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterJoinCondition(EsqlBaseParser.JoinConditionContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitJoinCondition(EsqlBaseParser.JoinConditionContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterJoinPredicate(EsqlBaseParser.JoinPredicateContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitJoinPredicate(EsqlBaseParser.JoinPredicateContext ctx) { } /** * {@inheritDoc} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java index 56b6999615f50..afe7146923791 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserBaseVisitor.java @@ -622,4 +622,32 @@ public class EsqlBaseParserBaseVisitor extends AbstractParseTreeVisitor im * {@link #visitChildren} on {@code ctx}.

    */ @Override public T visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitJoinCommand(EsqlBaseParser.JoinCommandContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitJoinTarget(EsqlBaseParser.JoinTargetContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitJoinCondition(EsqlBaseParser.JoinConditionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitJoinPredicate(EsqlBaseParser.JoinPredicateContext ctx) { return visitChildren(ctx); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java index cf658c4a73141..0faca2541c9ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserListener.java @@ -941,4 +941,44 @@ public interface EsqlBaseParserListener extends ParseTreeListener { * @param ctx the parse tree */ void exitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx); + /** + * Enter a parse tree produced by {@link EsqlBaseParser#joinCommand}. + * @param ctx the parse tree + */ + void enterJoinCommand(EsqlBaseParser.JoinCommandContext ctx); + /** + * Exit a parse tree produced by {@link EsqlBaseParser#joinCommand}. + * @param ctx the parse tree + */ + void exitJoinCommand(EsqlBaseParser.JoinCommandContext ctx); + /** + * Enter a parse tree produced by {@link EsqlBaseParser#joinTarget}. + * @param ctx the parse tree + */ + void enterJoinTarget(EsqlBaseParser.JoinTargetContext ctx); + /** + * Exit a parse tree produced by {@link EsqlBaseParser#joinTarget}. + * @param ctx the parse tree + */ + void exitJoinTarget(EsqlBaseParser.JoinTargetContext ctx); + /** + * Enter a parse tree produced by {@link EsqlBaseParser#joinCondition}. + * @param ctx the parse tree + */ + void enterJoinCondition(EsqlBaseParser.JoinConditionContext ctx); + /** + * Exit a parse tree produced by {@link EsqlBaseParser#joinCondition}. + * @param ctx the parse tree + */ + void exitJoinCondition(EsqlBaseParser.JoinConditionContext ctx); + /** + * Enter a parse tree produced by {@link EsqlBaseParser#joinPredicate}. + * @param ctx the parse tree + */ + void enterJoinPredicate(EsqlBaseParser.JoinPredicateContext ctx); + /** + * Exit a parse tree produced by {@link EsqlBaseParser#joinPredicate}. + * @param ctx the parse tree + */ + void exitJoinPredicate(EsqlBaseParser.JoinPredicateContext ctx); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java index 86c1d1aafc33a..e91cd6670e971 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParserVisitor.java @@ -567,4 +567,28 @@ public interface EsqlBaseParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitInlinestatsCommand(EsqlBaseParser.InlinestatsCommandContext ctx); + /** + * Visit a parse tree produced by {@link EsqlBaseParser#joinCommand}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinCommand(EsqlBaseParser.JoinCommandContext ctx); + /** + * Visit a parse tree produced by {@link EsqlBaseParser#joinTarget}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinTarget(EsqlBaseParser.JoinTargetContext ctx); + /** + * Visit a parse tree produced by {@link EsqlBaseParser#joinCondition}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinCondition(EsqlBaseParser.JoinConditionContext ctx); + /** + * Visit a parse tree produced by {@link EsqlBaseParser#joinPredicate}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinPredicate(EsqlBaseParser.JoinPredicateContext ctx); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index f83af534eaa72..24398afa18010 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -16,6 +16,7 @@ import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; @@ -53,6 +54,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Rename; import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.joni.exception.SyntaxException; @@ -68,6 +70,7 @@ import java.util.Set; import java.util.function.Function; +import static java.util.Collections.emptyList; import static org.elasticsearch.common.logging.HeaderWarning.addWarning; import static org.elasticsearch.xpack.esql.core.util.StringUtils.WILDCARD; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputExpressions; @@ -274,7 +277,8 @@ public LogicalPlan visitFromCommand(EsqlBaseParser.FromCommandContext ctx) { for (var c : metadataOptionContext.UNQUOTED_SOURCE()) { String id = c.getText(); Source src = source(c); - if (MetadataAttribute.isSupported(id) == false) { + if (MetadataAttribute.isSupported(id) == false // TODO: drop check below once METADATA_SCORE is no longer snapshot-only + || (EsqlCapabilities.Cap.METADATA_SCORE.isEnabled() == false && MetadataAttribute.SCORE.equals(id))) { throw new ParsingException(src, "unsupported metadata field [" + id + "]"); } Attribute a = metadataMap.put(id, MetadataAttribute.create(src, id)); @@ -502,7 +506,7 @@ public LogicalPlan visitMetricsCommand(EsqlBaseParser.MetricsCommandContext ctx) @Override public PlanFactory visitLookupCommand(EsqlBaseParser.LookupCommandContext ctx) { if (false == Build.current().isSnapshot()) { - throw new ParsingException(source(ctx), "LOOKUP is in preview and only available in SNAPSHOT build"); + throw new ParsingException(source(ctx), "LOOKUP__ is in preview and only available in SNAPSHOT build"); } var source = source(ctx); @@ -524,4 +528,42 @@ public PlanFactory visitLookupCommand(EsqlBaseParser.LookupCommandContext ctx) { return p -> new Lookup(source, p, tableName, matchFields, null /* localRelation will be resolved later*/); } + public PlanFactory visitJoinCommand(EsqlBaseParser.JoinCommandContext ctx) { + var source = source(ctx); + if (false == Build.current().isSnapshot()) { + throw new ParsingException(source, "JOIN is in preview and only available in SNAPSHOT build"); + } + + if (ctx.type != null && ctx.type.getType() != EsqlBaseParser.DEV_JOIN_LOOKUP) { + String joinType = ctx.type == null ? "(INNER)" : ctx.type.getText(); + throw new ParsingException(source, "only LOOKUP JOIN available, {} JOIN unsupported at the moment", joinType); + } + + var target = ctx.joinTarget(); + UnresolvedRelation right = new UnresolvedRelation( + source(target), + new TableIdentifier(source(target.index), null, visitIdentifier(target.index)), + false, + emptyList(), + IndexMode.LOOKUP, + null, + "???" + ); + + var condition = ctx.joinCondition(); + + // ON only with qualified names + var predicates = expressions(condition.joinPredicate()); + List joinFields = new ArrayList<>(predicates.size()); + for (var f : predicates) { + // verify each field is an unresolved attribute + if (f instanceof UnresolvedAttribute ua) { + joinFields.add(ua); + } else { + throw new ParsingException(f.source(), "JOIN ON clause only supports fields at the moment, found [{}]", f.sourceText()); + } + } + + return p -> new LookupJoin(source, p, right, joinFields); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ParsingException.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ParsingException.java index 0705ae7f778cd..484a655fc2988 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ParsingException.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ParsingException.java @@ -18,7 +18,7 @@ public class ParsingException extends EsqlClientException { public ParsingException(String message, Exception cause, int line, int charPositionInLine) { super(message, cause); this.line = line; - this.charPositionInLine = charPositionInLine; + this.charPositionInLine = charPositionInLine + 1; } ParsingException(String message, Object... args) { @@ -42,7 +42,7 @@ public int getLineNumber() { } public int getColumnNumber() { - return charPositionInLine + 1; + return charPositionInLine; } public String getErrorMessage() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/PlanWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/PlanWritables.java new file mode 100644 index 0000000000000..b3c273cbfa1bb --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/PlanWritables.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Dissect; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.Grok; +import org.elasticsearch.xpack.esql.plan.logical.InlineStats; +import org.elasticsearch.xpack.esql.plan.logical.Limit; +import org.elasticsearch.xpack.esql.plan.logical.Lookup; +import org.elasticsearch.xpack.esql.plan.logical.MvExpand; +import org.elasticsearch.xpack.esql.plan.logical.OrderBy; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; +import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.esql.plan.physical.DissectExec; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; +import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.EvalExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; +import org.elasticsearch.xpack.esql.plan.physical.FilterExec; +import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; +import org.elasticsearch.xpack.esql.plan.physical.GrokExec; +import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; +import org.elasticsearch.xpack.esql.plan.physical.LimitExec; +import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; +import org.elasticsearch.xpack.esql.plan.physical.OrderExec; +import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; +import org.elasticsearch.xpack.esql.plan.physical.ShowExec; +import org.elasticsearch.xpack.esql.plan.physical.SubqueryExec; +import org.elasticsearch.xpack.esql.plan.physical.TopNExec; + +import java.util.ArrayList; +import java.util.List; + +public class PlanWritables { + + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.addAll(logical()); + entries.addAll(phsyical()); + return entries; + } + + public static List logical() { + return List.of( + Aggregate.ENTRY, + Dissect.ENTRY, + Enrich.ENTRY, + EsRelation.ENTRY, + EsqlProject.ENTRY, + Eval.ENTRY, + Filter.ENTRY, + Grok.ENTRY, + InlineStats.ENTRY, + InlineJoin.ENTRY, + Join.ENTRY, + LocalRelation.ENTRY, + Limit.ENTRY, + Lookup.ENTRY, + MvExpand.ENTRY, + OrderBy.ENTRY, + Project.ENTRY, + TopN.ENTRY + ); + } + + public static List phsyical() { + return List.of( + AggregateExec.ENTRY, + DissectExec.ENTRY, + EnrichExec.ENTRY, + EsQueryExec.ENTRY, + EsSourceExec.ENTRY, + EvalExec.ENTRY, + ExchangeExec.ENTRY, + ExchangeSinkExec.ENTRY, + ExchangeSourceExec.ENTRY, + FieldExtractExec.ENTRY, + FilterExec.ENTRY, + FragmentExec.ENTRY, + GrokExec.ENTRY, + HashJoinExec.ENTRY, + LimitExec.ENTRY, + LocalSourceExec.ENTRY, + MvExpandExec.ENTRY, + OrderExec.ENTRY, + ProjectExec.ENTRY, + ShowExec.ENTRY, + SubqueryExec.ENTRY, + TopNExec.ENTRY + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java index e34e0b8e27863..02373cc62e81f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QueryPlan.java @@ -33,6 +33,10 @@ public QueryPlan(Source source, List children) { super(source, children); } + /** + * The ordered list of attributes (i.e. columns) this plan produces when executed. + * Must be called only on resolved plans, otherwise may throw an exception or return wrong results. + */ public abstract List output(); public AttributeSet outputSet() { @@ -59,12 +63,17 @@ public AttributeSet inputSet() { */ public List expressions() { if (lazyExpressions == null) { - lazyExpressions = new ArrayList<>(); - forEachPropertyOnly(Object.class, e -> doForEachExpression(e, lazyExpressions::add)); + lazyExpressions = computeExpressions(); } return lazyExpressions; } + protected List computeExpressions() { + List expressions = new ArrayList<>(); + forEachPropertyOnly(Object.class, e -> doForEachExpression(e, expressions::add)); + return expressions; + } + /** * The attributes required to be in the {@link QueryPlan#inputSet()} for this plan to be valid. * Excludes generated references. @@ -82,6 +91,7 @@ public AttributeSet references() { /** * This very likely needs to be overridden for {@link QueryPlan#references} to be correct when inheriting. + * This can be called on unresolved plans and therefore must not rely on calls to {@link QueryPlan#output()}. */ protected AttributeSet computeReferences() { return Expressions.references(expressions()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java index e65cdda4b6069..91cd7f7a15840 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/BinaryPlan.java @@ -6,8 +6,6 @@ */ package org.elasticsearch.xpack.esql.plan.logical; -import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.tree.Source; import java.util.Arrays; @@ -45,11 +43,6 @@ public final BinaryPlan replaceRight(LogicalPlan newRight) { return replaceChildren(left, newRight); } - protected AttributeSet computeReferences() { - // TODO: this needs to be driven by the join config - return Expressions.references(output()); - } - public abstract BinaryPlan replaceChildren(LogicalPlan left, LogicalPlan right); @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java index eb72009638396..794a52b8e3f89 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/EsRelation.java @@ -181,7 +181,12 @@ public boolean equals(Object obj) { @Override public String nodeString() { - return nodeName() + "[" + index + "]" + NodeUtils.limitedToString(attrs); + return nodeName() + + "[" + + index + + "]" + + (indexMode != IndexMode.STANDARD ? "[" + indexMode.name() + "]" : "") + + NodeUtils.limitedToString(attrs); } public static IndexMode readIndexMode(StreamInput in) throws IOException { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java index 9e854450a2d34..4211f8a0d45b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/InlineStats.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import java.io.IOException; import java.util.ArrayList; @@ -118,7 +118,7 @@ private JoinConfig joinConfig() { } } } - return new JoinConfig(JoinType.LEFT, namedGroupings, leftFields, rightFields); + return new JoinConfig(JoinTypes.LEFT, namedGroupings, leftFields, rightFields); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java index e07dd9e14649e..e845c25bd3b32 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/LogicalPlan.java @@ -6,15 +6,10 @@ */ package org.elasticsearch.xpack.esql.plan.logical; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.capabilities.Resolvable; import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.plan.QueryPlan; -import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import java.util.List; @@ -23,29 +18,6 @@ * For example, a logical plan in English would be: "I want to get from DEN to SFO". */ public abstract class LogicalPlan extends QueryPlan implements Resolvable { - public static List getNamedWriteables() { - return List.of( - Aggregate.ENTRY, - Dissect.ENTRY, - Enrich.ENTRY, - EsRelation.ENTRY, - EsqlProject.ENTRY, - Eval.ENTRY, - Filter.ENTRY, - Grok.ENTRY, - InlineStats.ENTRY, - InlineJoin.ENTRY, - Join.ENTRY, - LocalRelation.ENTRY, - Limit.ENTRY, - Lookup.ENTRY, - MvExpand.ENTRY, - OrderBy.ENTRY, - Project.ENTRY, - TopN.ENTRY - ); - } - /** * Order is important in the enum; any values should be added at the end. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java index 70f8a24cfc87e..6e7f421003292 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Lookup.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import java.io.IOException; @@ -114,7 +114,7 @@ public JoinConfig joinConfig() { } } } - return new JoinConfig(JoinType.LEFT, matchFields, leftFields, rightFields); + return new JoinConfig(JoinTypes.LEFT, matchFields, leftFields, rightFields); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java index 0dfbe4936e4e3..384c3f7a340ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/UnresolvedRelation.java @@ -25,6 +25,10 @@ public class UnresolvedRelation extends LeafPlan implements Unresolvable { private final TableIdentifier table; private final boolean frozen; private final List metadataFields; + /* + * Expected indexMode based on the declaration - used later for verification + * at resolution time. + */ private final IndexMode indexMode; private final String unresolvedMsg; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java index f9be61ed2c8d7..6af29fb23b3bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/Join.java @@ -12,8 +12,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.AttributeSet; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -23,12 +21,11 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Objects; -import java.util.Set; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; +import static org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes.LEFT; public class Join extends BinaryPlan { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(LogicalPlan.class, "Join", Join::new); @@ -92,11 +89,6 @@ protected NodeInfo info() { ); } - @Override - public Join replaceChildren(LogicalPlan left, LogicalPlan right) { - return new Join(source(), left, right, config); - } - @Override public List output() { if (lazyOutput == null) { @@ -105,36 +97,36 @@ public List output() { return lazyOutput; } - /** - * Merge output fields. - * Currently only implemented for LEFT JOINs; the rightOutput shadows the leftOutput, except for any attributes that - * occur in the join's matchFields. - */ - public static List computeOutput(List leftOutput, List rightOutput, JoinConfig config) { - AttributeSet matchFieldSet = new AttributeSet(config.matchFields()); - Set matchFieldNames = new HashSet<>(Expressions.names(config.matchFields())); - return switch (config.type()) { - case LEFT -> { - // Right side becomes nullable. - List fieldsAddedFromRight = removeCollisionsWithMatchFields(rightOutput, matchFieldSet, matchFieldNames); - yield mergeOutputAttributes(fieldsAddedFromRight, leftOutput); + public List rightOutputFields() { + AttributeSet leftInputs = left().outputSet(); + + List rightOutputFields = new ArrayList<>(); + for (Attribute attr : output()) { + if (leftInputs.contains(attr) == false) { + rightOutputFields.add(attr); } - default -> throw new UnsupportedOperationException("Other JOINs than LEFT not supported"); - }; + } + + return rightOutputFields; } - private static List removeCollisionsWithMatchFields( - List attributes, - AttributeSet matchFields, - Set matchFieldNames - ) { - List result = new ArrayList<>(); - for (Attribute attr : attributes) { - if ((matchFields.contains(attr) || matchFieldNames.contains(attr.name())) == false) { - result.add(attr); - } + /** + * Combine the two lists of attributes into one. + * In case of (name) conflicts, specify which sides wins, that is overrides the other column - the left or the right. + */ + public static List computeOutput(List leftOutput, List rightOutput, JoinConfig config) { + JoinType joinType = config.type(); + List output; + // TODO: make the other side nullable + if (LEFT.equals(joinType)) { + // right side becomes nullable and overrides left except for join keys, which we preserve from the left + AttributeSet rightKeys = new AttributeSet(config.rightFields()); + List rightOutputWithoutMatchFields = rightOutput.stream().filter(attr -> rightKeys.contains(attr) == false).toList(); + output = mergeOutputAttributes(rightOutputWithoutMatchFields, leftOutput); + } else { + throw new IllegalArgumentException(joinType.joinName() + " unsupported"); } - return result; + return output; } /** @@ -160,14 +152,6 @@ public static List makeReference(List output) { return out; } - public static List makeNullable(List output) { - List out = new ArrayList<>(output.size()); - for (Attribute a : output) { - out.add(a.withNullability(Nullability.TRUE)); - } - return out; - } - @Override public boolean expressionsResolved() { return config.expressionsResolved(); @@ -181,6 +165,15 @@ public boolean resolved() { return childrenResolved() && expressionsResolved(); } + public Join withConfig(JoinConfig config) { + return new Join(source(), left(), right(), config); + } + + @Override + public Join replaceChildren(LogicalPlan left, LogicalPlan right) { + return new Join(source(), left, right, config); + } + @Override public String commandName() { return "JOIN"; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java index 68ad50f2f67a0..383606d6ccbed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinConfig.java @@ -22,12 +22,16 @@ * @param leftFields matched with the right fields * @param rightFields matched with the left fields */ +// TODO: this class needs refactoring into a more general form (expressions) since it's currently contains +// both the condition (equi-join) between the left and right field as well as the output of the join keys +// which makes sense only for USING clause - which is better resolved in the analyzer (based on the names) +// hence why for now the attributes are set inside the analyzer public record JoinConfig(JoinType type, List matchFields, List leftFields, List rightFields) implements Writeable { public JoinConfig(StreamInput in) throws IOException { this( - JoinType.readFrom(in), + JoinTypes.readFrom(in), in.readNamedWriteableCollectionAsList(Attribute.class), in.readNamedWriteableCollectionAsList(Attribute.class), in.readNamedWriteableCollectionAsList(Attribute.class) @@ -43,6 +47,9 @@ public void writeTo(StreamOutput out) throws IOException { } public boolean expressionsResolved() { - return Resolvables.resolved(matchFields) && Resolvables.resolved(leftFields) && Resolvables.resolved(rightFields); + return type.resolved() + && Resolvables.resolved(matchFields) + && Resolvables.resolved(leftFields) + && Resolvables.resolved(rightFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinType.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinType.java index c3095efc9e623..a309387b1f0a2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinType.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinType.java @@ -7,46 +7,15 @@ package org.elasticsearch.xpack.esql.plan.logical.join; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import java.io.IOException; +public interface JoinType extends Writeable { -public enum JoinType implements Writeable { - INNER(0, "INNER"), - LEFT(1, "LEFT OUTER"), - RIGHT(2, "RIGHT OUTER"), - FULL(3, "FULL OUTER"), - CROSS(4, "CROSS"); - - private final byte id; - private final String name; - - JoinType(int id, String name) { - this.id = (byte) id; - this.name = name; - } - - @Override - public String toString() { - return name; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(id); + default String joinName() { + return getClass().getSimpleName(); } - public static JoinType readFrom(StreamInput in) throws IOException { - byte id = in.readByte(); - return switch (id) { - case 0 -> INNER; - case 1 -> LEFT; - case 2 -> RIGHT; - case 3 -> FULL; - case 4 -> CROSS; - default -> throw new IllegalArgumentException("unsupported join [" + id + "]"); - }; + default boolean resolved() { + return true; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinTypes.java new file mode 100644 index 0000000000000..9d3471bc356f7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/JoinTypes.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.plan.logical.join; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.xpack.esql.core.capabilities.Resolvables; +import org.elasticsearch.xpack.esql.core.expression.Attribute; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Utility class defining the concrete types of joins supported by ESQL. + */ +public class JoinTypes { + + private JoinTypes() {} + + public static JoinType INNER = CoreJoinType.INNER; + public static JoinType LEFT = CoreJoinType.LEFT; + public static JoinType RIGHT = CoreJoinType.RIGHT; + public static JoinType FULL = CoreJoinType.FULL; + public static JoinType CROSS = CoreJoinType.CROSS; + + private static Map JOIN_TYPES; + + static { + CoreJoinType[] types = CoreJoinType.values(); + JOIN_TYPES = Maps.newMapWithExpectedSize(types.length); + for (CoreJoinType type : types) { + JOIN_TYPES.put(type.id, type); + } + } + + /** + * The predefined core join types. Implements as enum for easy comparison and serialization. + */ + private enum CoreJoinType implements JoinType { + INNER(1, "INNER"), + LEFT(2, "LEFT OUTER"), + RIGHT(3, "RIGHT OUTER"), + FULL(4, "FULL OUTER"), + CROSS(5, "CROSS"); + + private final String name; + private final byte id; + + CoreJoinType(int id, String name) { + this.id = (byte) id; + this.name = name; + } + + @Override + public String joinName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + } + + /** + * Join type for the USING clause - shorthand for defining an equi-join (equality join meaning the condition checks if columns across + * each side of the join are equal). + * One important difference is that the USING clause returns the join column only once, at the beginning of the result set. + */ + public static class UsingJoinType implements JoinType { + private final List columns; + private final JoinType coreJoin; + + public UsingJoinType(JoinType coreJoin, List columns) { + this.columns = columns; + this.coreJoin = coreJoin; + } + + @Override + public String joinName() { + return coreJoin.joinName() + " USING " + columns.toString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IllegalArgumentException("USING join type should not be serialized due to being rewritten"); + } + + public JoinType coreJoin() { + return coreJoin; + } + + public List columns() { + return columns; + } + + @Override + public boolean resolved() { + return Resolvables.resolved(columns); + } + + @Override + public int hashCode() { + return Objects.hash(columns, coreJoin); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UsingJoinType that = (UsingJoinType) o; + return Objects.equals(columns, that.columns) && coreJoin == that.coreJoin; + } + + @Override + public String toString() { + return joinName(); + } + } + + /** + * Private class so it doesn't get used yet it is defined to showcase why the join type was defined as an interface instead of a simpler + * enum. + */ + private abstract static class NaturalJoinType implements JoinType { + + private final JoinType joinType; + + private NaturalJoinType(JoinType joinType) { + this.joinType = joinType; + } + + @Override + public String joinName() { + return "NATURAL " + joinType.joinName(); + } + } + + public static JoinType readFrom(StreamInput in) throws IOException { + byte id = in.readByte(); + JoinType type = JOIN_TYPES.get(id); + if (type == null) { + throw new IllegalArgumentException("unsupported join [" + id + "]"); + } + ; + return type; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/LookupJoin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/LookupJoin.java new file mode 100644 index 0000000000000..57c8cb00baa32 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/join/LookupJoin.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.logical.join; + +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.SurrogateLogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes.UsingJoinType; + +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes.LEFT; + +/** + * Lookup join - specialized LEFT (OUTER) JOIN between the main left side and a lookup index (index_mode = lookup) on the right. + */ +public class LookupJoin extends Join implements SurrogateLogicalPlan { + + public LookupJoin(Source source, LogicalPlan left, LogicalPlan right, List joinFields) { + this(source, left, right, new UsingJoinType(LEFT, joinFields), emptyList(), emptyList(), emptyList()); + } + + public LookupJoin( + Source source, + LogicalPlan left, + LogicalPlan right, + JoinType type, + List joinFields, + List leftFields, + List rightFields + ) { + this(source, left, right, new JoinConfig(type, joinFields, leftFields, rightFields)); + } + + public LookupJoin(Source source, LogicalPlan left, LogicalPlan right, JoinConfig joinConfig) { + super(source, left, right, joinConfig); + } + + /** + * Translate the expression into a regular join with a Projection on top, to deal with serialization & co. + */ + @Override + public LogicalPlan surrogate() { + Join normalized = new Join(source(), left(), right(), config()); + // TODO: decide whether to introduce USING or just basic ON semantics - keep the ordering out for now + return new Project(source(), normalized, output()); + } + + @Override + public Join replaceChildren(LogicalPlan left, LogicalPlan right) { + return new LookupJoin(source(), left, right, config()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create( + this, + LookupJoin::new, + left(), + right(), + config().type(), + config().matchFields(), + config().leftFields(), + config().rightFields() + ); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index 82848fb2f1062..267b9e613abef 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -15,6 +15,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.GeoDistanceSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.esql.core.expression.Attribute; @@ -94,6 +95,19 @@ public SortBuilder sortBuilder() { } } + public record ScoreSort(Order.OrderDirection direction) implements Sort { + @Override + public SortBuilder sortBuilder() { + return new ScoreSortBuilder(); + } + + @Override + public FieldAttribute field() { + // TODO: refactor this: not all Sorts are backed by FieldAttributes + return null; + } + } + public EsQueryExec(Source source, EsIndex index, IndexMode indexMode, List attributes, QueryBuilder query) { this(source, index, indexMode, attributes, query, null, null, null); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java index 5b1ee14642dbe..444c111539033 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExec.java @@ -31,7 +31,6 @@ public class FragmentExec extends LeafExec implements EstimatesRowSize { private final LogicalPlan fragment; private final QueryBuilder esFilter; - private final PhysicalPlan reducer; // datanode-level physical plan node that performs an intermediate (not partial) reduce /** * Estimate of the number of bytes that'll be loaded per position before @@ -40,25 +39,28 @@ public class FragmentExec extends LeafExec implements EstimatesRowSize { private final int estimatedRowSize; public FragmentExec(LogicalPlan fragment) { - this(fragment.source(), fragment, null, 0, null); + this(fragment.source(), fragment, null, 0); } - public FragmentExec(Source source, LogicalPlan fragment, QueryBuilder esFilter, int estimatedRowSize, PhysicalPlan reducer) { + public FragmentExec(Source source, LogicalPlan fragment, QueryBuilder esFilter, int estimatedRowSize) { super(source); this.fragment = fragment; this.esFilter = esFilter; this.estimatedRowSize = estimatedRowSize; - this.reducer = reducer; } private FragmentExec(StreamInput in) throws IOException { - this( - Source.readFrom((PlanStreamInput) in), - in.readNamedWriteable(LogicalPlan.class), - in.readOptionalNamedWriteable(QueryBuilder.class), - in.readOptionalVInt(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readOptionalNamedWriteable(PhysicalPlan.class) : null - ); + super(Source.readFrom((PlanStreamInput) in)); + this.fragment = in.readNamedWriteable(LogicalPlan.class); + this.esFilter = in.readOptionalNamedWriteable(QueryBuilder.class); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_REMOVE_NODE_LEVEL_PLAN)) { + this.estimatedRowSize = in.readVInt(); + } else { + this.estimatedRowSize = Objects.requireNonNull(in.readOptionalVInt()); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { + in.readOptionalNamedWriteable(PhysicalPlan.class); // for old reducer + } + } } @Override @@ -66,9 +68,13 @@ public void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); out.writeNamedWriteable(fragment()); out.writeOptionalNamedWriteable(esFilter()); - out.writeOptionalVInt(estimatedRowSize()); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { - out.writeOptionalNamedWriteable(reducer); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REMOVE_NODE_LEVEL_PLAN)) { + out.writeVInt(estimatedRowSize); + } else { + out.writeOptionalVInt(estimatedRowSize()); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { + out.writeOptionalNamedWriteable(null);// for old reducer + } } } @@ -89,13 +95,9 @@ public Integer estimatedRowSize() { return estimatedRowSize; } - public PhysicalPlan reducer() { - return reducer; - } - @Override protected NodeInfo info() { - return NodeInfo.create(this, FragmentExec::new, fragment, esFilter, estimatedRowSize, reducer); + return NodeInfo.create(this, FragmentExec::new, fragment, esFilter, estimatedRowSize); } @Override @@ -108,24 +110,20 @@ public PhysicalPlan estimateRowSize(State state) { int estimatedRowSize = state.consumeAllFields(false); return Objects.equals(estimatedRowSize, this.estimatedRowSize) ? this - : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + : new FragmentExec(source(), fragment, esFilter, estimatedRowSize); } public FragmentExec withFragment(LogicalPlan fragment) { - return Objects.equals(fragment, this.fragment) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + return Objects.equals(fragment, this.fragment) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize); } public FragmentExec withFilter(QueryBuilder filter) { - return Objects.equals(filter, this.esFilter) ? this : new FragmentExec(source(), fragment, filter, estimatedRowSize, reducer); - } - - public FragmentExec withReducer(PhysicalPlan reducer) { - return Objects.equals(reducer, this.reducer) ? this : new FragmentExec(source(), fragment, esFilter, estimatedRowSize, reducer); + return Objects.equals(filter, this.esFilter) ? this : new FragmentExec(source(), fragment, filter, estimatedRowSize); } @Override public int hashCode() { - return Objects.hash(fragment, esFilter, estimatedRowSize, reducer); + return Objects.hash(fragment, esFilter, estimatedRowSize); } @Override @@ -141,8 +139,7 @@ public boolean equals(Object obj) { FragmentExec other = (FragmentExec) obj; return Objects.equals(fragment, other.fragment) && Objects.equals(esFilter, other.esFilter) - && Objects.equals(estimatedRowSize, other.estimatedRowSize) - && Objects.equals(reducer, other.reducer); + && Objects.equals(estimatedRowSize, other.estimatedRowSize); } @Override @@ -154,7 +151,6 @@ public String nodeString() { sb.append(", estimatedRowSize="); sb.append(estimatedRowSize); sb.append(", reducer=["); - sb.append(reducer == null ? "" : reducer.toString()); sb.append("], fragment=[<>\n"); sb.append(fragment.toString()); sb.append("<>]]"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java index 4574c3720f8ee..5ae3702993fcb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/HashJoinExec.java @@ -91,7 +91,7 @@ public List rightFields() { public Set addedFields() { if (lazyAddedFields == null) { - lazyAddedFields = outputSet(); + lazyAddedFields = new AttributeSet(output()); lazyAddedFields.removeAll(left().output()); } return lazyAddedFields; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java new file mode 100644 index 0000000000000..2d3caa27da4cd --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plan.physical; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; +import org.elasticsearch.xpack.esql.core.expression.Expressions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class LookupJoinExec extends BinaryExec implements EstimatesRowSize { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + PhysicalPlan.class, + "LookupJoinExec", + LookupJoinExec::new + ); + + private final List leftFields; + private final List rightFields; + /** + * These cannot be computed from the left + right outputs, because + * {@link org.elasticsearch.xpack.esql.optimizer.rules.physical.local.ReplaceSourceAttributes} will replace the {@link EsSourceExec} on + * the right hand side by a {@link EsQueryExec}, and thus lose the information of which fields we'll get from the lookup index. + */ + private final List addedFields; + private List lazyOutput; + + public LookupJoinExec( + Source source, + PhysicalPlan left, + PhysicalPlan lookup, + List leftFields, + List rightFields, + List addedFields + ) { + super(source, left, lookup); + this.leftFields = leftFields; + this.rightFields = rightFields; + this.addedFields = addedFields; + } + + private LookupJoinExec(StreamInput in) throws IOException { + super(Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(PhysicalPlan.class), in.readNamedWriteable(PhysicalPlan.class)); + this.leftFields = in.readNamedWriteableCollectionAsList(Attribute.class); + this.rightFields = in.readNamedWriteableCollectionAsList(Attribute.class); + this.addedFields = in.readNamedWriteableCollectionAsList(Attribute.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeNamedWriteableCollection(leftFields); + out.writeNamedWriteableCollection(rightFields); + out.writeNamedWriteableCollection(addedFields); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + public PhysicalPlan lookup() { + return right(); + } + + public List leftFields() { + return leftFields; + } + + public List rightFields() { + return rightFields; + } + + public List addedFields() { + return addedFields; + } + + @Override + public List output() { + if (lazyOutput == null) { + lazyOutput = new ArrayList<>(left().output()); + for (Attribute attr : addedFields) { + lazyOutput.add(attr); + } + } + return lazyOutput; + } + + @Override + public PhysicalPlan estimateRowSize(State state) { + state.add(false, output()); + return this; + } + + @Override + public AttributeSet inputSet() { + // TODO: this is a hack since the right side is always materialized - instead this should + // return the _doc so the extraction can happen lazily + return left().outputSet(); + } + + @Override + protected AttributeSet computeReferences() { + // TODO: same as above - once lazy materialization of both sides lands, this needs updating + return Expressions.references(leftFields); + } + + @Override + public LookupJoinExec replaceChildren(PhysicalPlan left, PhysicalPlan right) { + return new LookupJoinExec(source(), left, right, leftFields, rightFields, addedFields); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LookupJoinExec::new, left(), right(), leftFields, rightFields, addedFields); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (super.equals(o) == false) { + return false; + } + LookupJoinExec other = (LookupJoinExec) o; + return leftFields.equals(other.leftFields) && rightFields.equals(other.rightFields) && addedFields.equals(other.addedFields); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), leftFields, rightFields, addedFields); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java index ecf78908d6d3e..d2935ccb75b66 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/PhysicalPlan.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.plan.physical; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.plan.QueryPlan; @@ -20,34 +19,6 @@ * PhysicalPlan = take Delta, DEN to SJC, then SJC to SFO */ public abstract class PhysicalPlan extends QueryPlan { - public static List getNamedWriteables() { - return List.of( - AggregateExec.ENTRY, - DissectExec.ENTRY, - EnrichExec.ENTRY, - EsQueryExec.ENTRY, - EsSourceExec.ENTRY, - EvalExec.ENTRY, - ExchangeExec.ENTRY, - ExchangeSinkExec.ENTRY, - ExchangeSourceExec.ENTRY, - FieldExtractExec.ENTRY, - FilterExec.ENTRY, - FragmentExec.ENTRY, - GrokExec.ENTRY, - HashJoinExec.ENTRY, - LimitExec.ENTRY, - LocalSourceExec.ENTRY, - MvExpandExec.ENTRY, - OrderExec.ENTRY, - ProjectExec.ENTRY, - RowExec.ENTRY, - ShowExec.ENTRY, - SubqueryExec.ENTRY, - TopNExec.ENTRY - ); - } - public PhysicalPlan(Source source, List children) { super(source, children); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RowExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RowExec.java deleted file mode 100644 index 3a104d4bc292b..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/RowExec.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.physical; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expressions; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -public class RowExec extends LeafExec { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(PhysicalPlan.class, "RowExec", RowExec::new); - - private final List fields; - - public RowExec(Source source, List fields) { - super(source); - this.fields = fields; - } - - private RowExec(StreamInput in) throws IOException { - this(Source.readFrom((PlanStreamInput) in), in.readCollectionAsList(Alias::new)); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - Source.EMPTY.writeTo(out); - out.writeCollection(fields()); - } - - @Override - public String getWriteableName() { - return ENTRY.name; - } - - public List fields() { - return fields; - } - - @Override - public List output() { - return Expressions.asAttributes(fields); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, RowExec::new, fields); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - RowExec constant = (RowExec) o; - return Objects.equals(fields, constant.fields); - } - - @Override - public int hashCode() { - return Objects.hash(fields); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index 94a9246a56f83..69e2d1c45aa3c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -18,6 +18,7 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.HashAggregationOperator.HashAggregationOperatorFactory; import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Alias; @@ -29,6 +30,7 @@ import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; @@ -45,6 +47,11 @@ public abstract class AbstractPhysicalOperationProviders implements PhysicalOperationProviders { private final AggregateMapper aggregateMapper = new AggregateMapper(); + private final AnalysisRegistry analysisRegistry; + + AbstractPhysicalOperationProviders(AnalysisRegistry analysisRegistry) { + this.analysisRegistry = analysisRegistry; + } @Override public final PhysicalOperation groupingPhysicalOperation( @@ -52,6 +59,7 @@ public final PhysicalOperation groupingPhysicalOperation( PhysicalOperation source, LocalExecutionPlannerContext context ) { + // The layout this operation will produce. Layout.Builder layout = new Layout.Builder(); Operator.OperatorFactory operatorFactory = null; AggregatorMode aggregatorMode = aggregateExec.getMode(); @@ -95,12 +103,17 @@ public final PhysicalOperation groupingPhysicalOperation( List aggregatorFactories = new ArrayList<>(); List groupSpecs = new ArrayList<>(aggregateExec.groupings().size()); for (Expression group : aggregateExec.groupings()) { - var groupAttribute = Expressions.attribute(group); - if (groupAttribute == null) { + Attribute groupAttribute = Expressions.attribute(group); + // In case of `... BY groupAttribute = CATEGORIZE(sourceGroupAttribute)` the actual source attribute is different. + Attribute sourceGroupAttribute = (aggregatorMode.isInputPartial() == false + && group instanceof Alias as + && as.child() instanceof Categorize categorize) ? Expressions.attribute(categorize.field()) : groupAttribute; + if (sourceGroupAttribute == null) { throw new EsqlIllegalArgumentException("Unexpected non-named expression[{}] as grouping in [{}]", group, aggregateExec); } - Layout.ChannelSet groupAttributeLayout = new Layout.ChannelSet(new HashSet<>(), groupAttribute.dataType()); - groupAttributeLayout.nameIds().add(groupAttribute.id()); + Layout.ChannelSet groupAttributeLayout = new Layout.ChannelSet(new HashSet<>(), sourceGroupAttribute.dataType()); + groupAttributeLayout.nameIds() + .add(group instanceof Alias as && as.child() instanceof Categorize ? groupAttribute.id() : sourceGroupAttribute.id()); /* * Check for aliasing in aggregates which occurs in two cases (due to combining project + stats): @@ -119,7 +132,7 @@ public final PhysicalOperation groupingPhysicalOperation( // check if there's any alias used in grouping - no need for the final reduction since the intermediate data // is in the output form // if the group points to an alias declared in the aggregate, use the alias child as source - else if (aggregatorMode == AggregatorMode.INITIAL || aggregatorMode == AggregatorMode.INTERMEDIATE) { + else if (aggregatorMode.isOutputPartial()) { if (groupAttribute.semanticEquals(a.toAttribute())) { groupAttribute = attr; break; @@ -129,8 +142,8 @@ else if (aggregatorMode == AggregatorMode.INITIAL || aggregatorMode == Aggregato } } layout.append(groupAttributeLayout); - Layout.ChannelAndType groupInput = source.layout.get(groupAttribute.id()); - groupSpecs.add(new GroupSpec(groupInput == null ? null : groupInput.channel(), groupAttribute)); + Layout.ChannelAndType groupInput = source.layout.get(sourceGroupAttribute.id()); + groupSpecs.add(new GroupSpec(groupInput == null ? null : groupInput.channel(), sourceGroupAttribute, group)); } if (aggregatorMode == AggregatorMode.FINAL) { @@ -164,8 +177,10 @@ else if (aggregatorMode == AggregatorMode.INITIAL || aggregatorMode == Aggregato } else { operatorFactory = new HashAggregationOperatorFactory( groupSpecs.stream().map(GroupSpec::toHashGroupSpec).toList(), + aggregatorMode, aggregatorFactories, - context.pageSize(aggregateExec.estimatedRowSize()) + context.pageSize(aggregateExec.estimatedRowSize()), + analysisRegistry ); } } @@ -178,10 +193,14 @@ else if (aggregatorMode == AggregatorMode.INITIAL || aggregatorMode == Aggregato /*** * Creates a standard layout for intermediate aggregations, typically used across exchanges. * Puts the group first, followed by each aggregation. - * - * It's similar to the code above (groupingPhysicalOperation) but ignores the factory creation. + *

    + * It's similar to the code above (groupingPhysicalOperation) but ignores the factory creation. + *

    */ public static List intermediateAttributes(List aggregates, List groupings) { + // TODO: This should take CATEGORIZE into account: + // it currently works because the CATEGORIZE intermediate state is just 1 block with the same type as the function return, + // so the attribute generated here is the expected one var aggregateMapper = new AggregateMapper(); List attrs = new ArrayList<>(); @@ -304,12 +323,20 @@ private static AggregatorFunctionSupplier supplier(AggregateFunction aggregateFu throw new EsqlIllegalArgumentException("aggregate functions must extend ToAggregator"); } - private record GroupSpec(Integer channel, Attribute attribute) { + /** + * The input configuration of this group. + * + * @param channel The source channel of this group + * @param attribute The attribute, source of this group + * @param expression The expression being used to group + */ + private record GroupSpec(Integer channel, Attribute attribute, Expression expression) { BlockHash.GroupSpec toHashGroupSpec() { if (channel == null) { throw new EsqlIllegalArgumentException("planned to use ordinals but tried to use the hash instead"); } - return new BlockHash.GroupSpec(channel, elementType()); + + return new BlockHash.GroupSpec(channel, elementType(), Alias.unwrap(expression) instanceof Categorize); } ElementType elementType() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 3e81c2a2c1101..18bbfdf485a81 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; +import org.elasticsearch.xpack.esql.expression.function.aggregate.StdDev; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial; import org.elasticsearch.xpack.esql.expression.function.aggregate.Top; @@ -48,9 +49,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.xpack.esql.core.type.DataType.CARTESIAN_POINT; -import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; - /** * Static class used to convert aggregate expressions to the named expressions that represent their intermediate state. *

    @@ -78,6 +76,7 @@ final class AggregateMapper { Min.class, Percentile.class, SpatialCentroid.class, + StdDev.class, Sum.class, Values.class, Top.class, @@ -171,7 +170,7 @@ private static Stream, Tuple>> typeAndNames(Class types = List.of("Int", "Long", "Double", "Boolean", "BytesRef"); } else if (Top.class.isAssignableFrom(clazz)) { types = List.of("Boolean", "Int", "Long", "Double", "Ip", "BytesRef"); - } else if (Rate.class.isAssignableFrom(clazz)) { + } else if (Rate.class.isAssignableFrom(clazz) || StdDev.class.isAssignableFrom(clazz)) { types = List.of("Int", "Long", "Double"); } else if (FromPartial.class.isAssignableFrom(clazz) || ToPartial.class.isAssignableFrom(clazz)) { types = List.of(""); // no type @@ -303,12 +302,13 @@ private static String dataTypeToString(DataType type, Class aggClass) { case DataType.INTEGER, DataType.COUNTER_INTEGER -> "Int"; case DataType.LONG, DataType.DATETIME, DataType.COUNTER_LONG, DataType.DATE_NANOS -> "Long"; case DataType.DOUBLE, DataType.COUNTER_DOUBLE -> "Double"; - case DataType.KEYWORD, DataType.IP, DataType.VERSION, DataType.TEXT -> "BytesRef"; + case DataType.KEYWORD, DataType.IP, DataType.VERSION, DataType.TEXT, DataType.SEMANTIC_TEXT -> "BytesRef"; case GEO_POINT -> "GeoPoint"; case CARTESIAN_POINT -> "CartesianPoint"; - case SEMANTIC_TEXT, UNSUPPORTED, NULL, UNSIGNED_LONG, SHORT, BYTE, FLOAT, HALF_FLOAT, SCALED_FLOAT, OBJECT, SOURCE, DATE_PERIOD, - TIME_DURATION, CARTESIAN_SHAPE, GEO_SHAPE, DOC_DATA_TYPE, TSID_DATA_TYPE, PARTIAL_AGG -> - throw new EsqlIllegalArgumentException("illegal agg type: " + type.typeName()); + case UNSUPPORTED, NULL, UNSIGNED_LONG, SHORT, BYTE, FLOAT, HALF_FLOAT, SCALED_FLOAT, OBJECT, SOURCE, DATE_PERIOD, TIME_DURATION, + CARTESIAN_SHAPE, GEO_SHAPE, DOC_DATA_TYPE, TSID_DATA_TYPE, PARTIAL_AGG -> throw new EsqlIllegalArgumentException( + "illegal agg type: " + type.typeName() + ); }; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index ab0d68b152262..7bf7d0e2d08eb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -34,6 +34,7 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -51,6 +52,7 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; @@ -97,7 +99,8 @@ public interface ShardContext extends org.elasticsearch.compute.lucene.ShardCont private final List shardContexts; - public EsPhysicalOperationProviders(List shardContexts) { + public EsPhysicalOperationProviders(List shardContexts, AnalysisRegistry analysisRegistry) { + super(analysisRegistry); this.shardContexts = shardContexts; } @@ -165,7 +168,10 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, assert esQueryExec.estimatedRowSize() != null : "estimated row size not initialized"; int rowEstimatedSize = esQueryExec.estimatedRowSize(); int limit = esQueryExec.limit() != null ? (Integer) esQueryExec.limit().fold() : NO_LIMIT; - if (sorts != null && sorts.isEmpty() == false) { + boolean scoring = esQueryExec.attrs() + .stream() + .anyMatch(a -> a instanceof MetadataAttribute && a.name().equals(MetadataAttribute.SCORE)); + if ((sorts != null && sorts.isEmpty() == false)) { List> sortBuilders = new ArrayList<>(sorts.size()); for (Sort sort : sorts) { sortBuilders.add(sort.sortBuilder()); @@ -177,7 +183,8 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, context.queryPragmas().taskConcurrency(), context.pageSize(rowEstimatedSize), limit, - sortBuilders + sortBuilders, + scoring ); } else { if (esQueryExec.indexMode() == IndexMode.TIME_SERIES) { @@ -195,7 +202,8 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, context.queryPragmas().dataPartitioning(), context.queryPragmas().taskConcurrency(), context.pageSize(rowEstimatedSize), - limit + limit, + scoring ); } } @@ -273,7 +281,7 @@ public IndexSearcher searcher() { @Override public Optional buildSort(List> sorts) throws IOException { - return SortBuilder.buildSort(sorts, ctx); + return SortBuilder.buildSort(sorts, ctx, false); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 12dc77e6e7c59..1580b77931240 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Check; +import org.elasticsearch.xpack.esql.expression.function.fulltext.Kql; import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; import org.elasticsearch.xpack.esql.expression.function.fulltext.QueryString; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; @@ -47,6 +48,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.querydsl.query.KqlQuery; import org.elasticsearch.xpack.esql.querydsl.query.SpatialRelatesQuery; import org.elasticsearch.xpack.versionfield.Version; @@ -86,10 +88,10 @@ public final class EsqlExpressionTranslators { new ExpressionTranslators.IsNotNulls(), new ExpressionTranslators.Nots(), new ExpressionTranslators.Likes(), - new ExpressionTranslators.StringQueries(), new ExpressionTranslators.MultiMatches(), new MatchFunctionTranslator(), new QueryStringFunctionTranslator(), + new KqlFunctionTranslator(), new Scalars() ); @@ -536,7 +538,14 @@ protected Query asQuery(Match match, TranslatorHandler handler) { public static class QueryStringFunctionTranslator extends ExpressionTranslator { @Override protected Query asQuery(QueryString queryString, TranslatorHandler handler) { - return new QueryStringQuery(queryString.source(), queryString.queryAsText(), Map.of(), null); + return new QueryStringQuery(queryString.source(), queryString.queryAsText(), Map.of(), Map.of()); + } + } + + public static class KqlFunctionTranslator extends ExpressionTranslator { + @Override + protected Query asQuery(Kql kqlFunction, TranslatorHandler handler) { + return new KqlQuery(kqlFunction.source(), kqlFunction.queryAsText()); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 0d0b8dda5fc74..a8afaa4d8119b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -31,7 +31,6 @@ import org.elasticsearch.compute.operator.Operator.OperatorFactory; import org.elasticsearch.compute.operator.OutputOperator.OutputOperatorFactory; import org.elasticsearch.compute.operator.RowInTableLookupOperator; -import org.elasticsearch.compute.operator.RowOperator.RowOperatorFactory; import org.elasticsearch.compute.operator.ShowOperator; import org.elasticsearch.compute.operator.SinkOperator; import org.elasticsearch.compute.operator.SinkOperator.SinkOperatorFactory; @@ -47,6 +46,7 @@ import org.elasticsearch.compute.operator.topn.TopNOperator.TopNOperatorFactory; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.tasks.CancellableTask; @@ -63,6 +63,8 @@ import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; +import org.elasticsearch.xpack.esql.enrich.LookupFromIndexOperator; +import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter; import org.elasticsearch.xpack.esql.expression.Order; @@ -81,11 +83,11 @@ import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -125,6 +127,7 @@ public class LocalExecutionPlanner { private final ExchangeSourceHandler exchangeSourceHandler; private final ExchangeSinkHandler exchangeSinkHandler; private final EnrichLookupService enrichLookupService; + private final LookupFromIndexService lookupFromIndexService; private final PhysicalOperationProviders physicalOperationProviders; public LocalExecutionPlanner( @@ -138,6 +141,7 @@ public LocalExecutionPlanner( ExchangeSourceHandler exchangeSourceHandler, ExchangeSinkHandler exchangeSinkHandler, EnrichLookupService enrichLookupService, + LookupFromIndexService lookupFromIndexService, PhysicalOperationProviders physicalOperationProviders ) { this.sessionId = sessionId; @@ -149,6 +153,7 @@ public LocalExecutionPlanner( this.exchangeSourceHandler = exchangeSourceHandler; this.exchangeSinkHandler = exchangeSinkHandler; this.enrichLookupService = enrichLookupService; + this.lookupFromIndexService = lookupFromIndexService; this.physicalOperationProviders = physicalOperationProviders; this.configuration = configuration; } @@ -213,8 +218,6 @@ else if (node instanceof EsQueryExec esQuery) { return planEsQueryNode(esQuery, context); } else if (node instanceof EsStatsQueryExec statsQuery) { return planEsStats(statsQuery, context); - } else if (node instanceof RowExec row) { - return planRow(row, context); } else if (node instanceof LocalSourceExec localSource) { return planLocal(localSource, context); } else if (node instanceof ShowExec show) { @@ -225,8 +228,10 @@ else if (node instanceof EsQueryExec esQuery) { // lookups and joins else if (node instanceof EnrichExec enrich) { return planEnrich(enrich, context); - } else if (node instanceof HashJoinExec lookup) { - return planHashJoin(lookup, context); + } else if (node instanceof HashJoinExec join) { + return planHashJoin(join, context); + } else if (node instanceof LookupJoinExec join) { + return planLookupJoin(join, context); } // output else if (node instanceof OutputExec outputExec) { @@ -353,11 +358,10 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte case VERSION -> TopNEncoder.VERSION; case BOOLEAN, NULL, BYTE, SHORT, INTEGER, LONG, DOUBLE, FLOAT, HALF_FLOAT, DATETIME, DATE_NANOS, DATE_PERIOD, TIME_DURATION, OBJECT, SCALED_FLOAT, UNSIGNED_LONG, DOC_DATA_TYPE, TSID_DATA_TYPE -> TopNEncoder.DEFAULT_SORTABLE; - case GEO_POINT, CARTESIAN_POINT, GEO_SHAPE, CARTESIAN_SHAPE, COUNTER_LONG, COUNTER_INTEGER, COUNTER_DOUBLE -> + case GEO_POINT, CARTESIAN_POINT, GEO_SHAPE, CARTESIAN_SHAPE, COUNTER_LONG, COUNTER_INTEGER, COUNTER_DOUBLE, SOURCE -> TopNEncoder.DEFAULT_UNSORTABLE; // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point case PARTIAL_AGG, UNSUPPORTED -> TopNEncoder.UNSUPPORTED; - case SOURCE -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); }; } List orders = topNExec.order().stream().map(order -> { @@ -489,7 +493,8 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon enrichIndex, enrich.matchType(), enrich.policyMatchField(), - enrich.enrichFields() + enrich.enrichFields(), + enrich.source() ), layout ); @@ -558,15 +563,57 @@ private PhysicalOperation planHashJoin(HashJoinExec join, LocalExecutionPlannerC return source.with(new ProjectOperatorFactory(projection), layout); } - private ExpressionEvaluator.Factory toEvaluator(Expression exp, Layout layout) { - return EvalMapper.toEvaluator(exp, layout); + private PhysicalOperation planLookupJoin(LookupJoinExec join, LocalExecutionPlannerContext context) { + PhysicalOperation source = plan(join.left(), context); + Layout.Builder layoutBuilder = source.layout.builder(); + for (Attribute f : join.addedFields()) { + layoutBuilder.append(f); + } + Layout layout = layoutBuilder.build(); + + // TODO: this works when the join happens on the coordinator + /* + * But when it happens on the data node we get a + * \_FieldExtractExec[language_code{f}#15, language_name{f}#16]<[]> + * \_EsQueryExec[languages_lookup], indexMode[lookup], query[][_doc{f}#18], limit[], sort[] estimatedRowSize[62] + * Which we'd prefer not to do - at least for now. We already know the fields we're loading + * and don't want any local planning. + */ + EsQueryExec localSourceExec = (EsQueryExec) join.lookup(); + if (localSourceExec.indexMode() != IndexMode.LOOKUP) { + throw new IllegalArgumentException("can't plan [" + join + "]"); + } + List matchFields = new ArrayList<>(join.leftFields().size()); + for (Attribute m : join.leftFields()) { + Layout.ChannelAndType t = source.layout.get(m.id()); + if (t == null) { + throw new IllegalArgumentException("can't plan [" + join + "][" + m + "]"); + } + matchFields.add(t); + } + if (matchFields.size() != 1) { + throw new IllegalArgumentException("can't plan [" + join + "]"); + } + + return source.with( + new LookupFromIndexOperator.Factory( + sessionId, + parentTask, + context.queryPragmas().enrichMaxWorkers(), + matchFields.getFirst().channel(), + lookupFromIndexService, + matchFields.getFirst().type(), + localSourceExec.index().name(), + join.leftFields().getFirst().name(), + join.addedFields().stream().map(f -> (NamedExpression) f).toList(), + join.source() + ), + layout + ); } - private PhysicalOperation planRow(RowExec row, LocalExecutionPlannerContext context) { - List obj = row.fields().stream().map(f -> f.child().fold()).toList(); - Layout.Builder layout = new Layout.Builder(); - layout.append(row.output()); - return PhysicalOperation.fromSource(new RowOperatorFactory(obj), layout.build()); + private ExpressionEvaluator.Factory toEvaluator(Expression exp, Layout layout) { + return EvalMapper.toEvaluator(exp, layout); } private PhysicalOperation planLocal(LocalSourceExec localSourceExec, LocalExecutionPlannerContext context) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java index ceffae704cff0..f95ae0e0783e5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/LocalMapper.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.planner.mapper; import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; @@ -21,11 +22,12 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.OrderExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; @@ -98,26 +100,28 @@ private PhysicalPlan mapBinary(BinaryPlan binary) { // special handling for inlinejoin - join + subquery which has to be executed first (async) and replaced by its result if (binary instanceof Join join) { JoinConfig config = join.config(); - if (config.type() != JoinType.LEFT) { + if (config.type() != JoinTypes.LEFT) { throw new EsqlIllegalArgumentException("unsupported join type [" + config.type() + "]"); } PhysicalPlan left = map(binary.left()); PhysicalPlan right = map(binary.right()); - if (right instanceof LocalSourceExec == false) { - throw new EsqlIllegalArgumentException("right side of a join must be a local source"); + // if the right is data we can use a hash join directly + if (right instanceof LocalSourceExec localData) { + return new HashJoinExec( + join.source(), + left, + localData, + config.matchFields(), + config.leftFields(), + config.rightFields(), + join.output() + ); + } + if (right instanceof EsSourceExec source && source.indexMode() == IndexMode.LOOKUP) { + return new LookupJoinExec(join.source(), left, right, config.leftFields(), config.rightFields(), join.rightOutputFields()); } - - return new HashJoinExec( - join.source(), - left, - right, - config.matchFields(), - config.leftFields(), - config.rightFields(), - join.output() - ); } return MapperUtils.unsupported(binary); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java index b717af650b7a6..8a4325ed84b2a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/Mapper.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.planner.mapper; import org.elasticsearch.compute.aggregation.AggregatorMode; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -23,13 +24,14 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.HashJoinExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.OrderExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; @@ -178,7 +180,7 @@ private PhysicalPlan mapUnary(UnaryPlan unary) { private PhysicalPlan mapBinary(BinaryPlan bp) { if (bp instanceof Join join) { JoinConfig config = join.config(); - if (config.type() != JoinType.LEFT) { + if (config.type() != JoinTypes.LEFT) { throw new EsqlIllegalArgumentException("unsupported join type [" + config.type() + "]"); } @@ -190,7 +192,7 @@ private PhysicalPlan mapBinary(BinaryPlan bp) { } PhysicalPlan right = map(bp.right()); - // no fragment means lookup + // if the right is data we can use a hash join directly if (right instanceof LocalSourceExec localData) { return new HashJoinExec( join.source(), @@ -202,6 +204,11 @@ private PhysicalPlan mapBinary(BinaryPlan bp) { join.output() ); } + if (right instanceof FragmentExec fragment + && fragment.fragment() instanceof EsRelation relation + && relation.indexMode() == IndexMode.LOOKUP) { + return new LookupJoinExec(join.source(), left, right, config.leftFields(), config.rightFields(), join.rightOutputFields()); + } } return MapperUtils.unsupported(bp); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java index 213e33f3712b1..e881eabb38c43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/mapper/MapperUtils.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.logical.MvExpand; import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.Row; import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.show.ShowInfo; @@ -39,7 +38,6 @@ import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; @@ -52,10 +50,6 @@ class MapperUtils { private MapperUtils() {} static PhysicalPlan mapLeaf(LeafPlan p) { - if (p instanceof Row row) { - return new RowExec(row.source(), row.fields()); - } - if (p instanceof LocalRelation local) { return new LocalSourceExec(local.source(), local.output(), local.supplier()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java index 49af4a593e6e5..8d041ffbdf0e4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeListener.java @@ -112,6 +112,7 @@ private ComputeListener( if (runningOnRemoteCluster()) { // for remote executions - this ComputeResponse is created on the remote cluster/node and will be serialized and // received by the acquireCompute method callback on the coordinating cluster + setFinalStatusAndShardCounts(clusterAlias, executionInfo); EsqlExecutionInfo.Cluster cluster = esqlExecutionInfo.getCluster(clusterAlias); result = new ComputeResponse( collectedProfiles.isEmpty() ? List.of() : collectedProfiles.stream().toList(), @@ -126,19 +127,33 @@ private ComputeListener( if (coordinatingClusterIsSearchedInCCS()) { // if not already marked as SKIPPED, mark the local cluster as finished once the coordinator and all // data nodes have finished processing - executionInfo.swapCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, (k, v) -> { - if (v.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { - return new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL).build(); - } else { - return v; - } - }); + setFinalStatusAndShardCounts(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, executionInfo); } } delegate.onResponse(result); }, e -> delegate.onFailure(failureCollector.getFailure()))); } + private static void setFinalStatusAndShardCounts(String clusterAlias, EsqlExecutionInfo executionInfo) { + executionInfo.swapCluster(clusterAlias, (k, v) -> { + // TODO: once PARTIAL status is supported (partial results work to come), modify this code as needed + if (v.getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { + assert v.getTotalShards() != null && v.getSkippedShards() != null : "Null total or skipped shard count: " + v; + return new EsqlExecutionInfo.Cluster.Builder(v).setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL) + /* + * Total and skipped shard counts are set early in execution (after can-match). + * Until ES|QL supports shard-level partial results, we just set all non-skipped shards + * as successful and none are failed. + */ + .setSuccessfulShards(v.getTotalShards()) + .setFailedShards(0) + .build(); + } else { + return v; + } + }); + } + /** * @return true if the "local" querying/coordinator cluster is being searched in a cross-cluster search */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index ffad379001ed0..b06dd3cdb64d3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -60,7 +60,9 @@ import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlSearchShardsAction; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; +import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; @@ -81,6 +83,7 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; @@ -98,13 +101,16 @@ public class ComputeService { private final DriverTaskRunner driverRunner; private final ExchangeService exchangeService; private final EnrichLookupService enrichLookupService; + private final LookupFromIndexService lookupFromIndexService; private final ClusterService clusterService; + private final AtomicLong childSessionIdGenerator = new AtomicLong(); public ComputeService( SearchService searchService, TransportService transportService, ExchangeService exchangeService, EnrichLookupService enrichLookupService, + LookupFromIndexService lookupFromIndexService, ClusterService clusterService, ThreadPool threadPool, BigArrays bigArrays, @@ -125,6 +131,7 @@ public ComputeService( this.driverRunner = new DriverTaskRunner(transportService, this.esqlExecutor); this.exchangeService = exchangeService; this.enrichLookupService = enrichLookupService; + this.lookupFromIndexService = lookupFromIndexService; this.clusterService = clusterService; } @@ -163,7 +170,7 @@ public void execute( return; } var computeContext = new ComputeContext( - sessionId, + newChildSession(sessionId), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, @@ -171,6 +178,7 @@ public void execute( null ); String local = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + updateShardCountForCoordinatorOnlyQuery(execInfo); try (var computeListener = ComputeListener.create(local, transportService, rootTask, execInfo, listener.map(r -> { updateExecutionInfoAfterCoordinatorOnlyQuery(execInfo); return new Result(physicalPlan.output(), collectedPages, r.getProfiles(), execInfo); @@ -190,10 +198,6 @@ public void execute( .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - final var exchangeSource = new ExchangeSourceHandler( - queryPragmas.exchangeBufferSize(), - transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) - ); String local = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; /* * Grab the output attributes here, so we can pass them to @@ -202,46 +206,74 @@ public void execute( */ List outputAttributes = physicalPlan.output(); try ( - Releasable ignored = exchangeSource.addEmptySink(); // this is the top level ComputeListener called once at the end (e.g., once all clusters have finished for a CCS) var computeListener = ComputeListener.create(local, transportService, rootTask, execInfo, listener.map(r -> { execInfo.markEndQuery(); // TODO: revisit this time recording model as part of INLINESTATS improvements return new Result(outputAttributes, collectedPages, r.getProfiles(), execInfo); })) ) { - // run compute on the coordinator - exchangeSource.addCompletionListener(computeListener.acquireAvoid()); - runCompute( - rootTask, - new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), - coordinatorPlan, - computeListener.acquireCompute(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) + var exchangeSource = new ExchangeSourceHandler( + queryPragmas.exchangeBufferSize(), + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH), + computeListener.acquireAvoid() ); - // starts computes on data nodes on the main cluster - if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { - startComputeOnDataNodes( + try (Releasable ignored = exchangeSource.addEmptySink()) { + // run compute on the coordinator + runCompute( + rootTask, + new ComputeContext( + sessionId, + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + List.of(), + configuration, + exchangeSource, + null + ), + coordinatorPlan, + computeListener.acquireCompute(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) + ); + // starts computes on data nodes on the main cluster + if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { + startComputeOnDataNodes( + sessionId, + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + rootTask, + configuration, + dataNodePlan, + Set.of(localConcreteIndices.indices()), + localOriginalIndices, + exchangeSource, + execInfo, + computeListener + ); + } + // starts computes on remote clusters + startComputeOnRemoteClusters( sessionId, - RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, rootTask, configuration, dataNodePlan, - Set.of(localConcreteIndices.indices()), - localOriginalIndices, exchangeSource, - execInfo, + getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), computeListener ); } - // starts computes on remote clusters - startComputeOnRemoteClusters( - sessionId, - rootTask, - configuration, - dataNodePlan, - exchangeSource, - getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), - computeListener - ); + } + } + + // For queries like: FROM logs* | LIMIT 0 (including cross-cluster LIMIT 0 queries) + private static void updateShardCountForCoordinatorOnlyQuery(EsqlExecutionInfo execInfo) { + if (execInfo.isCrossClusterSearch()) { + for (String clusterAlias : execInfo.clusterAliases()) { + execInfo.swapCluster( + clusterAlias, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .build() + ); + } } } @@ -251,19 +283,13 @@ private static void updateExecutionInfoAfterCoordinatorOnlyQuery(EsqlExecutionIn if (execInfo.isCrossClusterSearch()) { assert execInfo.planningTookTime() != null : "Planning took time should be set on EsqlExecutionInfo but is null"; for (String clusterAlias : execInfo.clusterAliases()) { - // took time and shard counts for SKIPPED clusters were added at end of planning, so only update other cases here - if (execInfo.getCluster(clusterAlias).getStatus() != EsqlExecutionInfo.Cluster.Status.SKIPPED) { - execInfo.swapCluster( - clusterAlias, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTook(execInfo.overallTook()) - .setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .build() - ); - } + execInfo.swapCluster(clusterAlias, (k, v) -> { + var builder = new EsqlExecutionInfo.Cluster.Builder(v).setTook(execInfo.overallTook()); + if (v.getStatus() == EsqlExecutionInfo.Cluster.Status.RUNNING) { + builder.setStatus(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL); + } + return builder.build(); + }); } } } @@ -302,14 +328,7 @@ private void startComputeOnDataNodes( EsqlExecutionInfo executionInfo, ComputeListener computeListener ) { - var planWithReducer = configuration.pragmas().nodeLevelReduction() == false - ? dataNodePlan - : dataNodePlan.transformUp(FragmentExec.class, f -> { - PhysicalPlan reductionNode = PlannerUtils.dataNodeReductionPlan(f.fragment(), dataNodePlan); - return reductionNode == null ? f : f.withReducer(reductionNode); - }); - - QueryBuilder requestFilter = PlannerUtils.requestTimestampFilter(planWithReducer); + QueryBuilder requestFilter = PlannerUtils.requestTimestampFilter(dataNodePlan); var lookupListener = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); // SearchShards API can_match is done in lookupDataNodes lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodeResult -> { @@ -318,9 +337,8 @@ private void startComputeOnDataNodes( executionInfo.swapCluster( clusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTotalShards(dataNodeResult.totalShards()) - .setSuccessfulShards(dataNodeResult.totalShards()) + // do not set successful or failed shard count here - do it when search is done .setSkippedShards(dataNodeResult.skippedShards()) - .setFailedShards(0) .build() ); @@ -328,27 +346,28 @@ private void startComputeOnDataNodes( // the new remote exchange sink, and initialize the computation on the target node via data-node-request. for (DataNode node : dataNodeResult.dataNodes()) { var queryPragmas = configuration.pragmas(); + var childSessionId = newChildSession(sessionId); ExchangeService.openExchange( transportService, node.connection, - sessionId, + childSessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, refs.acquire().delegateFailureAndWrap((l, unused) -> { - var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection); - exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + var remoteSink = exchangeService.newRemoteSink(parentTask, childSessionId, transportService, node.connection); + exchangeSource.addRemoteSink(remoteSink, true, queryPragmas.concurrentExchangeClients(), ActionListener.noop()); ActionListener computeResponseListener = computeListener.acquireCompute(clusterAlias); var dataNodeListener = ActionListener.runBefore(computeResponseListener, () -> l.onResponse(null)); transportService.sendChildRequest( node.connection, DATA_ACTION_NAME, new DataNodeRequest( - sessionId, + childSessionId, configuration, clusterAlias, node.shardIds, node.aliasFilters, - planWithReducer, + dataNodePlan, originalIndices.indices(), originalIndices.indicesOptions() ), @@ -376,17 +395,18 @@ private void startComputeOnRemoteClusters( var linkExchangeListeners = ActionListener.releaseAfter(computeListener.acquireAvoid(), exchangeSource.addEmptySink()); try (RefCountingListener refs = new RefCountingListener(linkExchangeListeners)) { for (RemoteCluster cluster : clusters) { + final var childSessionId = newChildSession(sessionId); ExchangeService.openExchange( transportService, cluster.connection, - sessionId, + childSessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, refs.acquire().delegateFailureAndWrap((l, unused) -> { - var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, cluster.connection); - exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + var remoteSink = exchangeService.newRemoteSink(rootTask, childSessionId, transportService, cluster.connection); + exchangeSource.addRemoteSink(remoteSink, true, queryPragmas.concurrentExchangeClients(), ActionListener.noop()); var remotePlan = new RemoteClusterPlan(plan, cluster.concreteIndices, cluster.originalIndices); - var clusterRequest = new ClusterComputeRequest(cluster.clusterAlias, sessionId, configuration, remotePlan); + var clusterRequest = new ClusterComputeRequest(cluster.clusterAlias, childSessionId, configuration, remotePlan); var clusterListener = ActionListener.runBefore( computeListener.acquireCompute(cluster.clusterAlias()), () -> l.onResponse(null) @@ -431,16 +451,17 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, context.exchangeSource(), context.exchangeSink(), enrichLookupService, - new EsPhysicalOperationProviders(contexts) + lookupFromIndexService, + new EsPhysicalOperationProviders(contexts, searchService.getIndicesService().getAnalysis()) ); LOGGER.debug("Received physical plan:\n{}", plan); + plan = PlannerUtils.localPlan(context.searchExecutionContexts(), context.configuration, plan); // the planner will also set the driver parallelism in LocalExecutionPlanner.LocalExecutionPlan (used down below) // it's doing this in the planning of EsQueryExec (the source of the data) // see also EsPhysicalOperationProviders.sourcePhysicalOperation LocalExecutionPlanner.LocalExecutionPlan localExecutionPlan = planner.plan(plan); - if (LOGGER.isDebugEnabled()) { LOGGER.debug("Local execution plan:\n{}", localExecutionPlan.describe()); } @@ -726,9 +747,8 @@ private void runComputeOnDataNode( // run the node-level reduction var externalSink = exchangeService.getSinkHandler(externalId); task.addListener(() -> exchangeService.finishSinkHandler(externalId, new TaskCancelledException(task.getReasonCancelled()))); - var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor); - exchangeSource.addCompletionListener(computeListener.acquireAvoid()); - exchangeSource.addRemoteSink(internalSink::fetchPageAsync, 1); + var exchangeSource = new ExchangeSourceHandler(1, esqlExecutor, computeListener.acquireAvoid()); + exchangeSource.addRemoteSink(internalSink::fetchPageAsync, true, 1, ActionListener.noop()); ActionListener reductionListener = computeListener.acquireCompute(); runCompute( task, @@ -771,14 +791,23 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T listener.onFailure(new IllegalStateException("expected a fragment plan for a remote compute; got " + request.plan())); return; } - var localExchangeSource = new ExchangeSourceExec(plan.source(), plan.output(), plan.isIntermediateAgg()); - FragmentExec fragment = (FragmentExec) fragments.get(0); + Holder reducePlanHolder = new Holder<>(); + if (request.pragmas().nodeLevelReduction()) { + PhysicalPlan dataNodePlan = request.plan(); + request.plan() + .forEachUp( + FragmentExec.class, + f -> { reducePlanHolder.set(PlannerUtils.dataNodeReductionPlan(f.fragment(), dataNodePlan)); } + ); + } reducePlan = new ExchangeSinkExec( plan.source(), plan.output(), plan.isIntermediateAgg(), - fragment.reducer() != null ? fragment.reducer().replaceChildren(List.of(localExchangeSource)) : localExchangeSource + reducePlanHolder.get() != null + ? reducePlanHolder.get().replaceChildren(List.of(localExchangeSource)) + : localExchangeSource ); } else { listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); @@ -865,11 +894,11 @@ void runComputeOnRemoteCluster( final String localSessionId = clusterAlias + ":" + globalSessionId; var exchangeSource = new ExchangeSourceHandler( configuration.pragmas().exchangeBufferSize(), - transportService.getThreadPool().executor(ThreadPool.Names.SEARCH) + transportService.getThreadPool().executor(ThreadPool.Names.SEARCH), + computeListener.acquireAvoid() ); try (Releasable ignored = exchangeSource.addEmptySink()) { exchangeSink.addCompletionListener(computeListener.acquireAvoid()); - exchangeSource.addCompletionListener(computeListener.acquireAvoid()); PhysicalPlan coordinatorPlan = new ExchangeSinkExec( plan.source(), plan.output(), @@ -909,4 +938,8 @@ public List searchExecutionContexts() { return searchContexts.stream().map(ctx -> ctx.getSearchExecutionContext()).toList(); } } + + private String newChildSession(String session) { + return session + "/" + childSessionIdGenerator.incrementAndGet(); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index 266f07d22eaf5..a347a6947bf67 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.plugin; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; @@ -16,7 +15,6 @@ import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import java.util.Collections; -import java.util.Map; import java.util.Set; /** @@ -48,34 +46,11 @@ public class EsqlFeatures implements FeatureSpecification { */ private static final NodeFeature ST_X_Y = new NodeFeature("esql.st_x_y"); - /** - * When we added the warnings for multivalued fields emitting {@code null} - * when they touched multivalued fields. Added in #102417. - */ - private static final NodeFeature MV_WARN = new NodeFeature("esql.mv_warn"); - - /** - * Support for loading {@code geo_point} and {@code cartesian_point} fields. Added in #102177. - */ - private static final NodeFeature SPATIAL_POINTS = new NodeFeature("esql.spatial_points"); - /** * Changed precision of {@code geo_point} and {@code cartesian_point} fields, by loading from source into WKB. Done in #103691. */ private static final NodeFeature SPATIAL_POINTS_FROM_SOURCE = new NodeFeature("esql.spatial_points_from_source"); - /** - * When we added the warnings when conversion functions fail. Like {@code TO_INT('foo')}. - * Added in ESQL-1183. - */ - private static final NodeFeature CONVERT_WARN = new NodeFeature("esql.convert_warn"); - - /** - * When we flipped the return type of {@code POW} to always return a double. Changed - * in #102183. - */ - private static final NodeFeature POW_DOUBLE = new NodeFeature("esql.pow_double"); - /** * Support for loading {@code geo_shape} and {@code cartesian_shape} fields. Done in #104269. */ @@ -152,12 +127,6 @@ public class EsqlFeatures implements FeatureSpecification { */ public static final NodeFeature METADATA_FIELDS = new NodeFeature("esql.metadata_fields"); - /** - * Support for loading values over enrich. This is supported by all versions of ESQL but not - * the unit test CsvTests. - */ - public static final NodeFeature ENRICH_LOAD = new NodeFeature("esql.enrich_load"); - /** * Support for timespan units abbreviations */ @@ -215,16 +184,4 @@ public Set getFeatures() { return features; } } - - @Override - public Map getHistoricalFeatures() { - return Map.ofEntries( - Map.entry(TransportEsqlStatsAction.ESQL_STATS_FEATURE, Version.V_8_11_0), - Map.entry(MV_WARN, Version.V_8_12_0), - Map.entry(SPATIAL_POINTS, Version.V_8_12_0), - Map.entry(CONVERT_WARN, Version.V_8_12_0), - Map.entry(POW_DOUBLE, Version.V_8_12_0), - Map.entry(ENRICH_LOAD, Version.V_8_12_0) - ); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index e9b9f571e880e..67948fe717f2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -21,8 +21,8 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; @@ -38,6 +38,7 @@ import org.elasticsearch.compute.operator.exchange.ExchangeSourceOperator; import org.elasticsearch.compute.operator.topn.TopNOperatorStatus; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; @@ -45,6 +46,7 @@ import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.esql.EsqlInfoTransportAction; @@ -58,17 +60,10 @@ import org.elasticsearch.xpack.esql.action.RestEsqlDeleteAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlQueryAction; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.expression.ExpressionWritables; +import org.elasticsearch.xpack.esql.plan.PlanWritables; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.IndexResolver; @@ -116,7 +111,7 @@ public Collection createComponents(PluginServices services) { BlockFactory blockFactory = new BlockFactory(circuitBreaker, bigArrays, maxPrimitiveArrayBlockSize); setupSharedSecrets(); return List.of( - new PlanExecutor(new IndexResolver(services.client()), services.telemetryProvider().getMeterRegistry()), + new PlanExecutor(new IndexResolver(services.client()), services.telemetryProvider().getMeterRegistry(), getLicenseState()), new ExchangeService(services.clusterService().getSettings(), services.threadPool(), ThreadPool.Names.SEARCH, blockFactory), blockFactory ); @@ -131,6 +126,11 @@ private void setupSharedSecrets() { } } + // to be overriden by tests + protected XPackLicenseState getLicenseState() { + return XPackPlugin.getSharedLicenseState(); + } + /** * The settings defined by the ESQL plugin. * @@ -192,18 +192,10 @@ public List getNamedWriteables() { entries.add(SingleValueQuery.ENTRY); entries.add(AsyncOperator.Status.ENTRY); entries.add(EnrichLookupOperator.Status.ENTRY); - entries.addAll(Block.getNamedWriteables()); - entries.addAll(Attribute.getNamedWriteables()); - entries.add(UnsupportedAttribute.ENTRY); // TODO combine with above once these are in the same project - entries.addAll(NamedExpression.getNamedWriteables()); - entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); // TODO combine with above once these are in the same project - entries.addAll(Expression.getNamedWriteables()); - entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); // TODO combine with above once these are in the same project - entries.addAll(EsqlScalarFunction.getNamedWriteables()); - entries.addAll(AggregateFunction.getNamedWriteables()); - entries.addAll(LogicalPlan.getNamedWriteables()); - entries.addAll(PhysicalPlan.getNamedWriteables()); - entries.addAll(FullTextFunction.getNamedWriteables()); + + entries.addAll(BlockWritables.getNamedWriteables()); + entries.addAll(ExpressionWritables.getNamedWriteables()); + entries.addAll(PlanWritables.getNamedWriteables()); return entries; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 04e5fdc4b3bd2..76bfb95d07926 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -101,6 +101,7 @@ public TransportEsqlQueryAction( transportService, exchangeService, enrichLookupService, + lookupFromIndexService, clusterService, threadPool, bigArrays, @@ -150,6 +151,8 @@ private void doExecuteForked(Task task, EsqlQueryRequest request, ActionListener @Override public void execute(EsqlQueryRequest request, EsqlQueryTask task, ActionListener listener) { + // set EsqlExecutionInfo on async-search task so that it is accessible to GET _query/async while the query is still running + task.setExecutionInfo(createEsqlExecutionInfo(request)); ActionListener.run(listener, l -> innerExecute(task, request, l)); } @@ -169,10 +172,9 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener remoteClusterService.isSkipUnavailable(clusterAlias), - request.includeCCSMetadata() - ); + // async-query uses EsqlQueryTask, so pull the EsqlExecutionInfo out of the task + // sync query uses CancellableTask which does not have EsqlExecutionInfo, so create one + EsqlExecutionInfo executionInfo = getOrCreateExecutionInfo(task, request); PlanRunner planRunner = (plan, resultListener) -> computeService.execute( sessionId, (CancellableTask) task, @@ -193,6 +195,18 @@ private void innerExecute(Task task, EsqlQueryRequest request, ActionListener remoteClusterService.isSkipUnavailable(clusterAlias), request.includeCCSMetadata()); + } + private EsqlQueryResponse toResponse(Task task, EsqlQueryRequest request, Configuration configuration, Result result) { List columns = result.schema().stream().map(c -> new ColumnInfoImpl(c.name(), c.dataType().outputType())).toList(); EsqlQueryResponse.Profile profile = configuration.profile() ? new EsqlQueryResponse.Profile(result.profiles()) : null; @@ -268,7 +282,7 @@ public EsqlQueryResponse initialResponse(EsqlQueryTask task) { asyncExecutionId, true, // is_running true, // isAsync - null + task.executionInfo() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlStatsAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlStatsAction.java index 985dcf118ac54..4067fc5a4e065 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlStatsAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlStatsAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -34,8 +33,6 @@ public class TransportEsqlStatsAction extends TransportNodesAction< EsqlStatsResponse.NodeStatsResponse, Void> { - static final NodeFeature ESQL_STATS_FEATURE = new NodeFeature("esql.stats_node"); - // the plan executor holds the metrics private final FeatureService featureService; private final PlanExecutor planExecutor; @@ -63,13 +60,7 @@ public TransportEsqlStatsAction( @Override protected DiscoveryNode[] resolveRequest(EsqlStatsRequest request, ClusterState clusterState) { - if (featureService.clusterHasFeature(clusterState, ESQL_STATS_FEATURE)) { - // use the whole cluster - return super.resolveRequest(request, clusterState); - } else { - // not all nodes in the cluster have upgraded to esql - just use this node for now - return new DiscoveryNode[] { clusterService.localNode() }; - } + return super.resolveRequest(request, clusterState); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/KqlQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/KqlQuery.java new file mode 100644 index 0000000000000..c388a131b9ab6 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/KqlQuery.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.querydsl.query; + +import org.elasticsearch.core.Booleans; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.esql.core.querydsl.query.Query; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.kql.query.KqlQueryBuilder; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +import static java.util.Map.entry; + +public class KqlQuery extends Query { + + private static final Map> BUILDER_APPLIERS = Map.ofEntries( + entry(KqlQueryBuilder.TIME_ZONE_FIELD.getPreferredName(), KqlQueryBuilder::timeZone), + entry(KqlQueryBuilder.DEFAULT_FIELD_FIELD.getPreferredName(), KqlQueryBuilder::defaultField), + entry(KqlQueryBuilder.CASE_INSENSITIVE_FIELD.getPreferredName(), (qb, s) -> qb.caseInsensitive(Booleans.parseBoolean(s))) + ); + + private final String query; + + private final Map options; + + // dedicated constructor for QueryTranslator + public KqlQuery(Source source, String query) { + this(source, query, null); + } + + public KqlQuery(Source source, String query, Map options) { + super(source); + this.query = query; + this.options = options == null ? Collections.emptyMap() : options; + } + + @Override + public QueryBuilder asBuilder() { + final KqlQueryBuilder queryBuilder = new KqlQueryBuilder(query); + options.forEach((k, v) -> { + if (BUILDER_APPLIERS.containsKey(k)) { + BUILDER_APPLIERS.get(k).accept(queryBuilder, v); + } else { + throw new IllegalArgumentException("illegal kql query option [" + k + "]"); + } + }); + return queryBuilder; + } + + public String query() { + return query; + } + + public Map options() { + return options; + } + + @Override + public int hashCode() { + return Objects.hash(query, options); + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + + KqlQuery other = (KqlQuery) obj; + return Objects.equals(query, other.query) && Objects.equals(options, other.options); + } + + @Override + protected String innerToString() { + return query; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 504689fdac39b..3b0f9ab578df9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.compute.data.Block; @@ -62,6 +63,8 @@ import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -76,8 +79,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.BiFunction; -import java.util.function.Predicate; import java.util.stream.Collectors; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -145,6 +146,7 @@ public String sessionId() { * Execute an ESQL request. */ public void execute(EsqlQueryRequest request, EsqlExecutionInfo executionInfo, PlanRunner planRunner, ActionListener listener) { + assert executionInfo != null : "Null EsqlExecutionInfo"; LOGGER.debug("ESQL query:\n{}", request.query()); analyzedPlan( parse(request.query(), request.params()), @@ -272,9 +274,12 @@ public void analyzedPlan(LogicalPlan parsed, EsqlExecutionInfo executionInfo, Ac return; } - preAnalyze(parsed, executionInfo, (indices, policies) -> { + preAnalyze(parsed, executionInfo, (indices, lookupIndices, policies) -> { planningMetrics.gatherPreAnalysisMetrics(parsed); - Analyzer analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indices, policies), verifier); + Analyzer analyzer = new Analyzer( + new AnalyzerContext(configuration, functionRegistry, indices, lookupIndices, policies), + verifier + ); var plan = analyzer.analyze(parsed); plan.setAnalyzed(); LOGGER.debug("Analyzed plan:\n{}", plan); @@ -285,77 +290,95 @@ public void analyzedPlan(LogicalPlan parsed, EsqlExecutionInfo executionInfo, Ac private void preAnalyze( LogicalPlan parsed, EsqlExecutionInfo executionInfo, - BiFunction action, + TriFunction action, ActionListener listener ) { PreAnalyzer.PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); var unresolvedPolicies = preAnalysis.enriches.stream() .map(e -> new EnrichPolicyResolver.UnresolvedPolicy((String) e.policyName().fold(), e.mode())) .collect(Collectors.toSet()); + final List indices = preAnalysis.indices; + // TODO: make a separate call for lookup indices final Set targetClusters = enrichPolicyResolver.groupIndicesPerCluster( - preAnalysis.indices.stream() - .flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))) - .toArray(String[]::new) + indices.stream().flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))).toArray(String[]::new) ).keySet(); enrichPolicyResolver.resolvePolicies(targetClusters, unresolvedPolicies, listener.delegateFailureAndWrap((l, enrichResolution) -> { // first we need the match_fields names from enrich policies and THEN, with an updated list of fields, we call field_caps API - var matchFields = enrichResolution.resolvedEnrichPolicies() + var enrichMatchFields = enrichResolution.resolvedEnrichPolicies() .stream() .map(ResolvedEnrichPolicy::matchField) .collect(Collectors.toSet()); - Map unavailableClusters = enrichResolution.getUnavailableClusters(); - preAnalyzeIndices(parsed, executionInfo, unavailableClusters, l.delegateFailureAndWrap((ll, indexResolution) -> { - // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid index - // resolution to updateExecutionInfo - if (indexResolution.isValid()) { - EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); - EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.getUnavailableClusters()); - if (executionInfo.isCrossClusterSearch() - && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { - // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel - // Exception to let the LogicalPlanActionListener decide how to proceed - ll.onFailure(new NoClustersToSearchException()); - return; - } - - Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( - indexResolution.get().concreteIndices().toArray(String[]::new) - ).keySet(); - // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again - // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies again. - // TODO: add a test for this - if (targetClusters.containsAll(newClusters) == false - // do not bother with a re-resolution if only remotes were requested and all were offline - && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { - enrichPolicyResolver.resolvePolicies( - newClusters, - unresolvedPolicies, - ll.map(newEnrichResolution -> action.apply(indexResolution, newEnrichResolution)) - ); - return; - } - } - ll.onResponse(action.apply(indexResolution, enrichResolution)); - }), matchFields); + // get the field names from the parsed plan combined with the ENRICH match fields from the ENRICH policy + var fieldNames = fieldNames(parsed, enrichMatchFields); + // First resolve the lookup indices, then the main indices + preAnalyzeLookupIndices( + preAnalysis.lookupIndices, + fieldNames, + l.delegateFailureAndWrap( + (lx, lookupIndexResolution) -> preAnalyzeIndices( + indices, + executionInfo, + enrichResolution.getUnavailableClusters(), + fieldNames, + lx.delegateFailureAndWrap((ll, indexResolution) -> { + // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid + // index resolution to updateExecutionInfo + if (indexResolution.isValid()) { + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters( + executionInfo, + indexResolution.unavailableClusters() + ); + if (executionInfo.isCrossClusterSearch() + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { + // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel + // Exception to let the LogicalPlanActionListener decide how to proceed + ll.onFailure(new NoClustersToSearchException()); + return; + } + + Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( + indexResolution.get().concreteIndices().toArray(String[]::new) + ).keySet(); + // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again + // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies + // again. + // TODO: add a test for this + if (targetClusters.containsAll(newClusters) == false + // do not bother with a re-resolution if only remotes were requested and all were offline + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { + enrichPolicyResolver.resolvePolicies( + newClusters, + unresolvedPolicies, + ll.map( + newEnrichResolution -> action.apply(indexResolution, lookupIndexResolution, newEnrichResolution) + ) + ); + return; + } + } + ll.onResponse(action.apply(indexResolution, lookupIndexResolution, enrichResolution)); + }) + ) + ) + ); })); } private void preAnalyzeIndices( - LogicalPlan parsed, + List indices, EsqlExecutionInfo executionInfo, Map unavailableClusters, // known to be unavailable from the enrich policy API call - ActionListener listener, - Set enrichPolicyMatchFields + Set fieldNames, + ActionListener listener ) { - PreAnalyzer.PreAnalysis preAnalysis = new PreAnalyzer().preAnalyze(parsed); // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one - if (preAnalysis.indices.size() > 1) { + if (indices.size() > 1) { // Note: JOINs are not supported but we detect them when listener.onFailure(new MappingException("Queries with multiple indices are not supported")); - } else if (preAnalysis.indices.size() == 1) { - TableInfo tableInfo = preAnalysis.indices.get(0); + } else if (indices.size() == 1) { + TableInfo tableInfo = indices.get(0); TableIdentifier table = tableInfo.id(); - var fieldNames = fieldNames(parsed, enrichPolicyMatchFields); Map clusterIndices = indicesExpressionGrouper.groupIndices(IndicesOptions.DEFAULT, table.index()); for (Map.Entry entry : clusterIndices.entrySet()) { @@ -401,6 +424,25 @@ private void preAnalyzeIndices( } } + private void preAnalyzeLookupIndices(List indices, Set fieldNames, ActionListener listener) { + if (indices.size() > 1) { + // Note: JOINs on more than one index are not yet supported + listener.onFailure(new MappingException("More than one LOOKUP JOIN is not supported")); + } else if (indices.size() == 1) { + TableInfo tableInfo = indices.get(0); + TableIdentifier table = tableInfo.id(); + // call the EsqlResolveFieldsAction (field-caps) to resolve indices and get field types + indexResolver.resolveAsMergedMapping(table.index(), fieldNames, listener); + } else { + try { + // No lookup indices specified + listener.onResponse(IndexResolution.invalid("[none specified]")); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + } + static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { // no explicit columns selection, for example "from employees" @@ -422,8 +464,7 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF // "keep" attributes are special whenever a wildcard is used in their name // ie "from test | eval lang = languages + 1 | keep *l" should consider both "languages" and "*l" as valid fields to ask for AttributeSet keepCommandReferences = new AttributeSet(); - List> keepMatches = new ArrayList<>(); - List keepPatterns = new ArrayList<>(); + AttributeSet keepJoinReferences = new AttributeSet(); parsed.forEachDown(p -> {// go over each plan top-down if (p instanceof RegexExtract re) { // for Grok and Dissect @@ -440,6 +481,11 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF // The exact name of the field will be added later as part of enrichPolicyMatchFields Set enrichRefs.removeIf(attr -> attr instanceof EmptyAttribute); references.addAll(enrichRefs); + } else if (p instanceof LookupJoin join) { + keepJoinReferences.addAll(join.config().matchFields()); // TODO: why is this empty + if (join.config().type() instanceof JoinTypes.UsingJoinType usingJoinType) { + keepJoinReferences.addAll(usingJoinType.columns()); + } } else { references.addAll(p.references()); if (p instanceof UnresolvedRelation ur && ur.indexMode() == IndexMode.TIME_SERIES) { @@ -452,7 +498,6 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF references.add(ua); if (p instanceof Keep) { keepCommandReferences.add(ua); - keepMatches.add(up::match); } }); if (p instanceof Keep) { @@ -463,7 +508,7 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF // remove any already discovered UnresolvedAttributes that are in fact aliases defined later down in the tree // for example "from test | eval x = salary | stats max = max(x) by gender" // remove the UnresolvedAttribute "x", since that is an Alias defined in "eval" - AttributeSet planRefs = Expressions.references(p.expressions()); + AttributeSet planRefs = p.references(); p.forEachExpressionDown(Alias.class, alias -> { // do not remove the UnresolvedAttribute that has the same name as its alias, ie "rename id = id" // or the UnresolvedAttributes that are used in Functions that have aliases "STATS id = MAX(id)" @@ -473,6 +518,8 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF references.removeIf(attr -> matchByName(attr, alias.name(), keepCommandReferences.contains(attr))); }); }); + // Add JOIN ON column references afterward to avoid Alias removal + references.addAll(keepJoinReferences); // remove valid metadata attributes because they will be filtered out by the IndexResolver anyway // otherwise, in some edge cases, we will fail to ask for "*" (all fields) instead diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index 80709d8f6c4f7..4fe2fef7e3f45 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -17,6 +17,7 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.index.IndexResolution; @@ -33,7 +34,6 @@ class EsqlSessionCCSUtils { private EsqlSessionCCSUtils() {} - // visible for testing static Map determineUnavailableRemoteClusters(List failures) { Map unavailableRemotes = new HashMap<>(); for (FieldCapabilitiesFailure failure : failures) { @@ -75,10 +75,10 @@ public void onFailure(Exception e) { /** * Whether to return an empty result (HTTP status 200) for a CCS rather than a top level 4xx/5xx error. - * + *

    * For cases where field-caps had no indices to search and the remotes were unavailable, we * return an empty successful response (200) if all remotes are marked with skip_unavailable=true. - * + *

    * Note: a follow-on PR will expand this logic to handle cases where no indices could be found to match * on any of the requested clusters. */ @@ -132,7 +132,6 @@ static void updateExecutionInfoToReturnEmptyResult(EsqlExecutionInfo executionIn } } - // visible for testing static String createIndexExpressionFromAvailableClusters(EsqlExecutionInfo executionInfo) { StringBuilder sb = new StringBuilder(); for (String clusterAlias : executionInfo.clusterAliases()) { @@ -181,39 +180,91 @@ static void updateExecutionInfoWithUnavailableClusters(EsqlExecutionInfo execInf } } - // visible for testing static void updateExecutionInfoWithClustersWithNoMatchingIndices(EsqlExecutionInfo executionInfo, IndexResolution indexResolution) { Set clustersWithResolvedIndices = new HashSet<>(); // determine missing clusters - for (String indexName : indexResolution.get().indexNameWithModes().keySet()) { + for (String indexName : indexResolution.resolvedIndices()) { clustersWithResolvedIndices.add(RemoteClusterAware.parseClusterAlias(indexName)); } Set clustersRequested = executionInfo.clusterAliases(); Set clustersWithNoMatchingIndices = Sets.difference(clustersRequested, clustersWithResolvedIndices); - clustersWithNoMatchingIndices.removeAll(indexResolution.getUnavailableClusters().keySet()); + clustersWithNoMatchingIndices.removeAll(indexResolution.unavailableClusters().keySet()); + + /** + * Rules enforced at planning time around non-matching indices + * P1. fail query if no matching indices on any cluster (VerificationException) - that is handled elsewhere (TODO: document where) + * P2. fail query if a skip_unavailable:false cluster has no matching indices (the local cluster already has this rule + * enforced at planning time) + * P3. fail query if the local cluster has no matching indices and a concrete index was specified + */ + String fatalErrorMessage = null; /* * These are clusters in the original request that are not present in the field-caps response. They were - * specified with an index or indices that do not exist, so the search on that cluster is done. + * specified with an index expression matched no indices, so the search on that cluster is done. * Mark it as SKIPPED with 0 shards searched and took=0. */ for (String c : clustersWithNoMatchingIndices) { - // TODO: in a follow-on PR, throw a Verification(400 status code) for local and remotes with skip_unavailable=false if - // they were requested with one or more concrete indices - // for now we never mark the local cluster as SKIPPED - final var status = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(c) - ? EsqlExecutionInfo.Cluster.Status.SUCCESSFUL - : EsqlExecutionInfo.Cluster.Status.SKIPPED; - executionInfo.swapCluster( - c, - (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) - .setTook(new TimeValue(0)) - .setTotalShards(0) - .setSuccessfulShards(0) - .setSkippedShards(0) - .setFailedShards(0) - .build() - ); + final String indexExpression = executionInfo.getCluster(c).getIndexExpression(); + if (missingIndicesIsFatal(c, executionInfo)) { + String error = Strings.format( + "Unknown index [%s]", + (c.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) ? indexExpression : c + ":" + indexExpression) + ); + if (fatalErrorMessage == null) { + fatalErrorMessage = error; + } else { + fatalErrorMessage += "; " + error; + } + } else { + // handles local cluster (when no concrete indices requested) and skip_unavailable=true clusters + EsqlExecutionInfo.Cluster.Status status; + ShardSearchFailure failure; + if (c.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + status = EsqlExecutionInfo.Cluster.Status.SUCCESSFUL; + failure = null; + } else { + status = EsqlExecutionInfo.Cluster.Status.SKIPPED; + failure = new ShardSearchFailure(new VerificationException("Unknown index [" + indexExpression + "]")); + } + executionInfo.swapCluster(c, (k, v) -> { + var builder = new EsqlExecutionInfo.Cluster.Builder(v).setStatus(status) + .setTook(new TimeValue(0)) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0); + if (failure != null) { + builder.setFailures(List.of(failure)); + } + return builder.build(); + }); + } } + if (fatalErrorMessage != null) { + throw new VerificationException(fatalErrorMessage); + } + } + + // visible for testing + static boolean missingIndicesIsFatal(String clusterAlias, EsqlExecutionInfo executionInfo) { + // missing indices on local cluster is fatal only if a concrete index requested + if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + return concreteIndexRequested(executionInfo.getCluster(clusterAlias).getIndexExpression()); + } + return executionInfo.getCluster(clusterAlias).isSkipUnavailable() == false; + } + + private static boolean concreteIndexRequested(String indexExpression) { + for (String expr : indexExpression.split(",")) { + if (expr.charAt(0) == '<' || expr.startsWith("-<")) { + // skip date math expressions + continue; + } + if (expr.indexOf('*') < 0) { + return true; + } + } + return false; } // visible for testing diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index 210f991306bac..f61be4b59830e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.session; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -97,9 +98,8 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp // TODO flattened is simpler - could we get away with that? String[] names = fieldsCaps.keySet().toArray(new String[0]); Arrays.sort(names); - Set forbiddenFields = new HashSet<>(); Map rootFields = new HashMap<>(); - name: for (String name : names) { + for (String name : names) { Map fields = rootFields; String fullName = name; boolean isAlias = false; @@ -110,9 +110,6 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp break; } String parent = name.substring(0, nextDot); - if (forbiddenFields.contains(parent)) { - continue name; - } EsField obj = fields.get(parent); if (obj == null) { obj = new EsField(parent, OBJECT, new HashMap<>(), false, true); @@ -124,16 +121,10 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp fields = obj.getProperties(); name = name.substring(nextDot + 1); } - - List caps = fieldsCaps.get(fullName); - if (allNested(caps)) { - forbiddenFields.add(name); - continue; - } // TODO we're careful to make isAlias match IndexResolver - but do we use it? EsField field = firstUnsupportedParent == null - ? createField(fieldCapsResponse, name, fullName, caps, isAlias) + ? createField(fieldCapsResponse, name, fullName, fieldsCaps.get(fullName), isAlias) : new UnsupportedEsField( fullName, firstUnsupportedParent.getOriginalType(), @@ -143,30 +134,24 @@ public IndexResolution mergedMappings(String indexPattern, FieldCapabilitiesResp fields.put(name, field); } - boolean allEmpty = true; - for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { - allEmpty &= ir.get().isEmpty(); - } - if (allEmpty) { - // If all the mappings are empty we return an empty set of resolved indices to line up with QL - return IndexResolution.valid(new EsIndex(indexPattern, rootFields, Map.of())); - } + Map unavailableRemotes = EsqlSessionCCSUtils.determineUnavailableRemoteClusters( + fieldCapsResponse.getFailures() + ); Map concreteIndices = Maps.newMapWithExpectedSize(fieldCapsResponse.getIndexResponses().size()); for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { concreteIndices.put(ir.getIndexName(), ir.getIndexMode()); } - EsIndex esIndex = new EsIndex(indexPattern, rootFields, concreteIndices); - return IndexResolution.valid(esIndex, EsqlSessionCCSUtils.determineUnavailableRemoteClusters(fieldCapsResponse.getFailures())); - } - private boolean allNested(List caps) { - for (IndexFieldCapabilities cap : caps) { - if (false == cap.type().equalsIgnoreCase("nested")) { - return false; - } + boolean allEmpty = true; + for (FieldCapabilitiesIndexResponse ir : fieldCapsResponse.getIndexResponses()) { + allEmpty &= ir.get().isEmpty(); + } + if (allEmpty) { + // If all the mappings are empty we return an empty set of resolved indices to line up with QL + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, Map.of()), concreteIndices.keySet(), unavailableRemotes); } - return true; + return IndexResolution.valid(new EsIndex(indexPattern, rootFields, concreteIndices), concreteIndices.keySet(), unavailableRemotes); } private static Map> collectFieldCaps(FieldCapabilitiesResponse fieldCapsResponse) { @@ -274,6 +259,8 @@ private static FieldCapabilitiesRequest createFieldCapsRequest(String index, Set // lenient because we throw our own errors looking at the response e.g. if something was not resolved // also because this way security doesn't throw authorization exceptions but rather honors ignore_unavailable req.indicesOptions(FIELD_CAPS_INDICES_OPTIONS); + // we ignore the nested data type fields starting with https://github.com/elastic/elasticsearch/pull/111495 + req.filters("-nested"); req.setMergeResults(false); return req; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index c9c292769b570..4bfc9ac5d848f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -274,27 +274,11 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy return null; } StringBuilder value = new StringBuilder(); - StringBuilder qualifier = new StringBuilder(); - StringBuilder nextBuffer = value; - boolean lastWasSpace = false; - for (char c : str.trim().toCharArray()) { - if (c == ' ') { - if (lastWasSpace == false) { - nextBuffer = nextBuffer == value ? qualifier : null; - } - lastWasSpace = true; - continue; - } - if (nextBuffer == null) { - throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); - } - nextBuffer.append(c); - lastWasSpace = false; - } - - if ((value.isEmpty() || qualifier.isEmpty()) == false) { + StringBuilder temporalUnit = new StringBuilder(); + separateValueAndTemporalUnitForTemporalAmount(str.strip(), value, temporalUnit, errorMessage, expectedType.toString()); + if ((value.isEmpty() || temporalUnit.isEmpty()) == false) { try { - TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), qualifier.toString(), Source.EMPTY); + TemporalAmount result = parseTemporalAmount(Integer.parseInt(value.toString()), temporalUnit.toString(), Source.EMPTY); if (DataType.DATE_PERIOD == expectedType && result instanceof Period || DataType.TIME_DURATION == expectedType && result instanceof Duration) { return result; @@ -312,6 +296,48 @@ public static TemporalAmount parseTemporalAmount(Object val, DataType expectedTy throw new ParsingException(Source.EMPTY, errorMessage, val, expectedType); } + public static TemporalAmount maybeParseTemporalAmount(String str) { + // The string literal can be either Date_Period or Time_Duration, derive the data type from its temporal unit + String errorMessage = "Cannot parse [{}] to {}"; + String expectedTypes = DATE_PERIOD + " or " + TIME_DURATION; + StringBuilder value = new StringBuilder(); + StringBuilder temporalUnit = new StringBuilder(); + separateValueAndTemporalUnitForTemporalAmount(str, value, temporalUnit, errorMessage, expectedTypes); + if ((value.isEmpty() || temporalUnit.isEmpty()) == false) { + try { + return parseTemporalAmount(Integer.parseInt(value.toString()), temporalUnit.toString(), Source.EMPTY); + } catch (NumberFormatException ex) { + throw new ParsingException(Source.EMPTY, errorMessage, str, expectedTypes); + } + } + return null; + } + + private static void separateValueAndTemporalUnitForTemporalAmount( + String temporalAmount, + StringBuilder value, + StringBuilder temporalUnit, + String errorMessage, + String expectedType + ) { + StringBuilder nextBuffer = value; + boolean lastWasSpace = false; + for (char c : temporalAmount.toCharArray()) { + if (c == ' ') { + if (lastWasSpace == false) { + nextBuffer = nextBuffer == value ? temporalUnit : null; + } + lastWasSpace = true; + continue; + } + if (nextBuffer == null) { + throw new ParsingException(Source.EMPTY, errorMessage, temporalAmount, expectedType); + } + nextBuffer.append(c); + lastWasSpace = false; + } + } + /** * Converts arbitrary object to the desired data type. *

    @@ -394,10 +420,10 @@ public static DataType commonType(DataType left, DataType right) { } // generally supporting abbreviations from https://en.wikipedia.org/wiki/Unit_of_time - public static TemporalAmount parseTemporalAmount(Number value, String qualifier, Source source) throws InvalidArgumentException, + public static TemporalAmount parseTemporalAmount(Number value, String temporalUnit, Source source) throws InvalidArgumentException, ArithmeticException, ParsingException { try { - return switch (INTERVALS.valueOf(qualifier.toUpperCase(Locale.ROOT))) { + return switch (INTERVALS.valueOf(temporalUnit.toUpperCase(Locale.ROOT))) { case MILLISECOND, MILLISECONDS, MS -> Duration.ofMillis(safeToLong(value)); case SECOND, SECONDS, SEC, S -> Duration.ofSeconds(safeToLong(value)); case MINUTE, MINUTES, MIN -> Duration.ofMinutes(safeToLong(value)); @@ -410,7 +436,7 @@ public static TemporalAmount parseTemporalAmount(Number value, String qualifier, case YEAR, YEARS, YR, Y -> Period.ofYears(safeToInt(safeToLong(value))); }; } catch (IllegalArgumentException e) { - throw new ParsingException(source, "Unexpected time interval qualifier: '{}'", qualifier); + throw new ParsingException(source, "Unexpected temporal unit: '{}'", temporalUnit); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 348ca4acd100e..df974a88a4c57 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -58,6 +58,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; +import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.index.EsIndex; @@ -235,7 +236,10 @@ public final void test() throws Throwable { * are tested in integration tests. */ assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); - assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD))); + assumeFalse( + "enrich can't load fields in csv tests", + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.ENRICH_LOAD.capabilityName()) + ); assumeFalse( "can't use match in csv tests", testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.capabilityName()) @@ -253,7 +257,14 @@ public final void test() throws Throwable { "can't use MATCH function in csv tests", testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.MATCH_FUNCTION.capabilityName()) ); - + assumeFalse( + "can't use KQL function in csv tests", + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.KQL_FUNCTION.capabilityName()) + ); + assumeFalse( + "lookup join disabled for csv tests", + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V3.capabilityName()) + ); if (Build.current().isSnapshot()) { assertThat( "Capability is not included in the enabled list capabilities on a snapshot build. Spelling mistake?", @@ -528,7 +539,7 @@ void executeSubPlan( bigArrays, ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) ); - ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor); + ExchangeSourceHandler exchangeSource = new ExchangeSourceHandler(between(1, 64), executor, ActionListener.noop()); ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(blockFactory, between(1, 64), threadPool::relativeTimeInMillis); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( @@ -542,6 +553,7 @@ void executeSubPlan( exchangeSource, exchangeSink, Mockito.mock(EnrichLookupService.class), + Mockito.mock(LookupFromIndexService.class), physicalOperationProviders ); @@ -557,7 +569,14 @@ void executeSubPlan( var physicalTestOptimizer = new TestLocalPhysicalPlanOptimizer(new LocalPhysicalOptimizerContext(configuration, searchStats)); var csvDataNodePhysicalPlan = PlannerUtils.localPlan(dataNodePlan, logicalTestOptimizer, physicalTestOptimizer); - exchangeSource.addRemoteSink(exchangeSink::fetchPageAsync, randomIntBetween(1, 3)); + exchangeSource.addRemoteSink( + exchangeSink::fetchPageAsync, + Randomness.get().nextBoolean(), + randomIntBetween(1, 3), + ActionListener.noop().delegateResponse((l, e) -> { + throw new AssertionError("expected no failure", e); + }) + ); LocalExecutionPlan dataNodeExecutionPlan = executionPlanner.plan(csvDataNodePhysicalPlan); drivers.addAll(dataNodeExecutionPlan.createDrivers(getTestName())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index 0a9e8b1b90681..9c24ec96dddf8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -24,15 +24,11 @@ import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.query.WildcardQueryBuilder; import org.elasticsearch.test.EqualsHashCodeTestUtils; -import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; +import org.elasticsearch.xpack.esql.expression.ExpressionWritables; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.PlanWritables; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; @@ -115,17 +111,9 @@ public static NamedWriteableRegistry writableRegistry() { entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, RegexpQueryBuilder.NAME, RegexpQueryBuilder::new)); entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, ExistsQueryBuilder.NAME, ExistsQueryBuilder::new)); entries.add(SingleValueQuery.ENTRY); - entries.addAll(Attribute.getNamedWriteables()); - entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(NamedExpression.getNamedWriteables()); - entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); - entries.addAll(Expression.getNamedWriteables()); - entries.addAll(EsqlScalarFunction.getNamedWriteables()); - entries.addAll(AggregateFunction.getNamedWriteables()); - entries.addAll(Block.getNamedWriteables()); - entries.addAll(LogicalPlan.getNamedWriteables()); - entries.addAll(PhysicalPlan.getNamedWriteables()); - entries.addAll(FullTextFunction.getNamedWriteables()); + entries.addAll(ExpressionWritables.getNamedWriteables()); + entries.addAll(PlanWritables.getNamedWriteables()); + entries.addAll(BlockWritables.getNamedWriteables()); return new NamedWriteableRegistry(entries); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java index 2f3aa09868637..134981d3c3b0c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverSleeps; @@ -39,7 +39,7 @@ protected EsqlQueryResponse.Profile mutateInstance(EsqlQueryResponse.Profile ins @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Stream.concat(Stream.of(AbstractPageMappingOperator.Status.ENTRY), Block.getNamedWriteables().stream()).toList() + Stream.concat(Stream.of(AbstractPageMappingOperator.Status.ENTRY), BlockWritables.getNamedWriteables().stream()).toList() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 4aaf4f6cccf0f..35364089127cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; @@ -99,7 +100,7 @@ public void blockFactoryEmpty() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( - Stream.concat(Stream.of(AbstractPageMappingOperator.Status.ENTRY), Block.getNamedWriteables().stream()).toList() + Stream.concat(Stream.of(AbstractPageMappingOperator.Status.ENTRY), BlockWritables.getNamedWriteables().stream()).toList() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index c1b2adddfc838..e0ebc92afa95d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1909,11 +1909,11 @@ public void testLookup() { String query = """ FROM test | RENAME languages AS int - | LOOKUP int_number_names ON int + | LOOKUP_🐔 int_number_names ON int """; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); - assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP_🐔' expecting {")); return; } LogicalPlan plan = analyze(query); @@ -1953,10 +1953,7 @@ public void testLookup() { .item(startsWith("long_noidx{f}")) .item(startsWith("salary{f}")) /* - * It's important that name is returned as a *reference* here - * instead of a field. If it were a field we'd use SearchStats - * on it and discover that it doesn't exist in the index. It doesn't! - * We don't expect it to. It exists only in the lookup table. + * As is the name column from the right side. */ .item(containsString("name{f}")) ); @@ -1965,11 +1962,11 @@ public void testLookup() { public void testLookupMissingField() { String query = """ FROM test - | LOOKUP int_number_names ON garbage + | LOOKUP_🐔 int_number_names ON garbage """; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); - assertThat(e.getMessage(), containsString("line 2:3: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 2:3: mismatched input 'LOOKUP_🐔' expecting {")); return; } var e = expectThrows(VerificationException.class, () -> analyze(query)); @@ -1979,11 +1976,11 @@ public void testLookupMissingField() { public void testLookupMissingTable() { String query = """ FROM test - | LOOKUP garbage ON a + | LOOKUP_🐔 garbage ON a """; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); - assertThat(e.getMessage(), containsString("line 2:3: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 2:3: mismatched input 'LOOKUP_🐔' expecting {")); return; } var e = expectThrows(VerificationException.class, () -> analyze(query)); @@ -1994,11 +1991,11 @@ public void testLookupMatchTypeWrong() { String query = """ FROM test | RENAME last_name AS int - | LOOKUP int_number_names ON int + | LOOKUP_🐔 int_number_names ON int """; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); - assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP_🐔' expecting {")); return; } var e = expectThrows(VerificationException.class, () -> analyze(query)); @@ -2321,8 +2318,6 @@ public void testInvalidNamedParamsForIdentifierPatterns() { } public void testFromEnrichAndMatchColonUsage() { - assumeTrue("Match operator is available just for snapshots", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); - LogicalPlan plan = analyze(""" from *:test | EVAL x = to_string(languages) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 0a34d6cd848bb..d4fca2a0a2540 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; @@ -21,6 +22,7 @@ import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.QueryParam; import org.elasticsearch.xpack.esql.parser.QueryParams; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -195,13 +197,13 @@ public void testUnsupportedAndMultiTypedFields() { if (EsqlCapabilities.Cap.LOOKUP_V4.isEnabled()) { // LOOKUP with unsupported type assertEquals( - "1:41: column type mismatch, table column was [integer] and original column was [unsupported]", - error("from test* | lookup int_number_names on int", analyzer) + "1:43: column type mismatch, table column was [integer] and original column was [unsupported]", + error("from test* | lookup_🐔 int_number_names on int", analyzer) ); // LOOKUP with multi-typed field assertEquals( - "1:44: column type mismatch, table column was [double] and original column was [unsupported]", - error("from test* | lookup double_number_names on double", analyzer) + "1:46: column type mismatch, table column was [double] and original column was [unsupported]", + error("from test* | lookup_🐔 double_number_names on double", analyzer) ); } @@ -404,6 +406,11 @@ public void testAggFilterOnBucketingOrAggFunctions() { query("from test | stats max(languages) WHERE bucket(salary, 10) > 1 by bucket(salary, 10)"); // but fails if it's different + assertEquals( + "1:32: can only use grouping function [bucket(a, 3)] part of the BY clause", + error("row a = 1 | stats sum(a) where bucket(a, 3) > -1 by bucket(a,2)") + ); + assertEquals( "1:40: can only use grouping function [bucket(salary, 10)] part of the BY clause", error("from test | stats max(languages) WHERE bucket(salary, 10) > 1 by emp_no") @@ -771,40 +778,40 @@ public void testWrongInputParam() { public void testPeriodAndDurationInRowAssignment() { for (var unit : TIME_DURATIONS) { - assertEquals("1:5: cannot use [1 " + unit + "] directly in a row assignment", error("row a = 1 " + unit)); + assertEquals("1:9: cannot use [1 " + unit + "] directly in a row assignment", error("row a = 1 " + unit)); assertEquals( - "1:5: cannot use [1 " + unit + "::time_duration] directly in a row assignment", + "1:9: cannot use [1 " + unit + "::time_duration] directly in a row assignment", error("row a = 1 " + unit + "::time_duration") ); assertEquals( - "1:5: cannot use [\"1 " + unit + "\"::time_duration] directly in a row assignment", + "1:9: cannot use [\"1 " + unit + "\"::time_duration] directly in a row assignment", error("row a = \"1 " + unit + "\"::time_duration") ); assertEquals( - "1:5: cannot use [to_timeduration(1 " + unit + ")] directly in a row assignment", + "1:9: cannot use [to_timeduration(1 " + unit + ")] directly in a row assignment", error("row a = to_timeduration(1 " + unit + ")") ); assertEquals( - "1:5: cannot use [to_timeduration(\"1 " + unit + "\")] directly in a row assignment", + "1:9: cannot use [to_timeduration(\"1 " + unit + "\")] directly in a row assignment", error("row a = to_timeduration(\"1 " + unit + "\")") ); } for (var unit : DATE_PERIODS) { - assertEquals("1:5: cannot use [1 " + unit + "] directly in a row assignment", error("row a = 1 " + unit)); + assertEquals("1:9: cannot use [1 " + unit + "] directly in a row assignment", error("row a = 1 " + unit)); assertEquals( - "1:5: cannot use [1 " + unit + "::date_period] directly in a row assignment", + "1:9: cannot use [1 " + unit + "::date_period] directly in a row assignment", error("row a = 1 " + unit + "::date_period") ); assertEquals( - "1:5: cannot use [\"1 " + unit + "\"::date_period] directly in a row assignment", + "1:9: cannot use [\"1 " + unit + "\"::date_period] directly in a row assignment", error("row a = \"1 " + unit + "\"::date_period") ); assertEquals( - "1:5: cannot use [to_dateperiod(1 " + unit + ")] directly in a row assignment", + "1:9: cannot use [to_dateperiod(1 " + unit + ")] directly in a row assignment", error("row a = to_dateperiod(1 " + unit + ")") ); assertEquals( - "1:5: cannot use [to_dateperiod(\"1 " + unit + "\")] directly in a row assignment", + "1:9: cannot use [to_dateperiod(\"1 " + unit + "\")] directly in a row assignment", error("row a = to_dateperiod(\"1 " + unit + "\")") ); } @@ -1159,8 +1166,6 @@ public void testMatchInsideEval() throws Exception { } public void testMatchFilter() throws Exception { - assumeTrue("Match operator is available just for snapshots", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); - assertEquals( "1:19: first argument of [salary:\"100\"] must be [string], found value [salary] type [integer]", error("from test | where salary:\"100\"") @@ -1183,10 +1188,13 @@ public void testMatchFunctionNotAllowedAfterCommands() throws Exception { "1:24: [MATCH] function cannot be used after LIMIT", error("from test | limit 10 | where match(first_name, \"Anna\")") ); + assertEquals( + "1:47: [MATCH] function cannot be used after STATS", + error("from test | STATS c = AVG(salary) BY gender | where match(gender, \"F\")") + ); } public void testMatchFunctionAndOperatorHaveCorrectErrorMessages() throws Exception { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); assertEquals( "1:24: [MATCH] function cannot be used after LIMIT", error("from test | limit 10 | where match(first_name, \"Anna\")") @@ -1257,17 +1265,79 @@ public void testQueryStringFunctionsNotAllowedAfterCommands() throws Exception { ); } + public void testKqlFunctionsNotAllowedAfterCommands() throws Exception { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + // Source commands + assertEquals("1:13: [KQL] function cannot be used after SHOW", error("show info | where kql(\"8.16.0\")")); + assertEquals("1:17: [KQL] function cannot be used after ROW", error("row a= \"Anna\" | where kql(\"Anna\")")); + + // Processing commands + assertEquals( + "1:43: [KQL] function cannot be used after DISSECT", + error("from test | dissect first_name \"%{foo}\" | where kql(\"Connection\")") + ); + assertEquals("1:27: [KQL] function cannot be used after DROP", error("from test | drop emp_no | where kql(\"Anna\")")); + assertEquals( + "1:71: [KQL] function cannot be used after ENRICH", + error("from test | enrich languages on languages with lang = language_name | where kql(\"Anna\")") + ); + assertEquals("1:26: [KQL] function cannot be used after EVAL", error("from test | eval z = 2 | where kql(\"Anna\")")); + assertEquals( + "1:44: [KQL] function cannot be used after GROK", + error("from test | grok last_name \"%{WORD:foo}\" | where kql(\"Anna\")") + ); + assertEquals("1:27: [KQL] function cannot be used after KEEP", error("from test | keep emp_no | where kql(\"Anna\")")); + assertEquals("1:24: [KQL] function cannot be used after LIMIT", error("from test | limit 10 | where kql(\"Anna\")")); + assertEquals("1:35: [KQL] function cannot be used after MV_EXPAND", error("from test | mv_expand last_name | where kql(\"Anna\")")); + assertEquals( + "1:45: [KQL] function cannot be used after RENAME", + error("from test | rename last_name as full_name | where kql(\"Anna\")") + ); + assertEquals( + "1:52: [KQL] function cannot be used after STATS", + error("from test | STATS c = COUNT(emp_no) BY languages | where kql(\"Anna\")") + ); + + // Some combination of processing commands + assertEquals("1:38: [KQL] function cannot be used after LIMIT", error("from test | keep emp_no | limit 10 | where kql(\"Anna\")")); + assertEquals( + "1:46: [KQL] function cannot be used after MV_EXPAND", + error("from test | limit 10 | mv_expand last_name | where kql(\"Anna\")") + ); + assertEquals( + "1:52: [KQL] function cannot be used after KEEP", + error("from test | mv_expand last_name | keep last_name | where kql(\"Anna\")") + ); + assertEquals( + "1:77: [KQL] function cannot be used after RENAME", + error("from test | STATS c = COUNT(emp_no) BY languages | rename c as total_emps | where kql(\"Anna\")") + ); + assertEquals( + "1:54: [KQL] function cannot be used after DROP", + error("from test | rename last_name as name | drop emp_no | where kql(\"Anna\")") + ); + } + public void testQueryStringFunctionOnlyAllowedInWhere() throws Exception { assertEquals("1:9: [QSTR] function is only supported in WHERE commands", error("row a = qstr(\"Anna\")")); checkFullTextFunctionsOnlyAllowedInWhere("QSTR", "qstr(\"Anna\")", "function"); } + public void testKqlFunctionOnlyAllowedInWhere() throws Exception { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + assertEquals("1:9: [KQL] function is only supported in WHERE commands", error("row a = kql(\"Anna\")")); + checkFullTextFunctionsOnlyAllowedInWhere("KQL", "kql(\"Anna\")", "function"); + } + public void testMatchFunctionOnlyAllowedInWhere() throws Exception { checkFullTextFunctionsOnlyAllowedInWhere("MATCH", "match(first_name, \"Anna\")", "function"); } public void testMatchOperatornOnlyAllowedInWhere() throws Exception { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); checkFullTextFunctionsOnlyAllowedInWhere(":", "first_name:\"Anna\"", "operator"); } @@ -1304,17 +1374,34 @@ public void testQueryStringFunctionArgNotNullOrConstant() throws Exception { // Other value types are tested in QueryStringFunctionTests } + public void testKqlFunctionArgNotNullOrConstant() throws Exception { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + assertEquals( + "1:19: argument of [kql(first_name)] must be a constant, received [first_name]", + error("from test | where kql(first_name)") + ); + assertEquals("1:19: argument of [kql(null)] cannot be null, received [null]", error("from test | where kql(null)")); + // Other value types are tested in KqlFunctionTests + } + public void testQueryStringWithDisjunctions() { checkWithDisjunctions("QSTR", "qstr(\"first_name: Anna\")", "function"); } + public void testKqlFunctionWithDisjunctions() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + checkWithDisjunctions("KQL", "kql(\"first_name: Anna\")", "function"); + } + public void testMatchFunctionWithDisjunctions() { checkWithDisjunctions("MATCH", "match(first_name, \"Anna\")", "function"); } public void testMatchOperatorWithDisjunctions() { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); - checkWithDisjunctions(":", "first_name : \"Anna\"", "operator"); } @@ -1365,12 +1452,18 @@ public void testQueryStringFunctionWithNonBooleanFunctions() { checkFullTextFunctionsWithNonBooleanFunctions("QSTR", "qstr(\"first_name: Anna\")", "function"); } + public void testKqlFunctionWithNonBooleanFunctions() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + checkFullTextFunctionsWithNonBooleanFunctions("KQL", "kql(\"first_name: Anna\")", "function"); + } + public void testMatchFunctionWithNonBooleanFunctions() { checkFullTextFunctionsWithNonBooleanFunctions("MATCH", "match(first_name, \"Anna\")", "function"); } public void testMatchOperatorWithNonBooleanFunctions() { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); checkFullTextFunctionsWithNonBooleanFunctions(":", "first_name:\"Anna\"", "operator"); } @@ -1448,8 +1541,6 @@ public void testMatchFunctionCurrentlyUnsupportedBehaviour() throws Exception { "1:68: Unknown column [first_name]", error("from test | stats max_salary = max(salary) by emp_no | where match(first_name, \"Anna\")") ); - - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); assertEquals( "1:62: Unknown column [first_name]", error("from test | stats max_salary = max(salary) by emp_no | where first_name : \"Anna\"") @@ -1469,8 +1560,6 @@ public void testMatchFunctionNullArgs() throws Exception { public void testMatchTargetsExistingField() throws Exception { assertEquals("1:39: Unknown column [first_name]", error("from test | keep emp_no | where match(first_name, \"Anna\")")); - - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); assertEquals("1:33: Unknown column [first_name]", error("from test | keep emp_no | where first_name : \"Anna\"")); } @@ -1667,6 +1756,170 @@ public void testToDatePeriodToTimeDurationWithInvalidType() { ); } + public void testNonMetadataScore() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + assertEquals("1:12: `_score` is a reserved METADATA attribute", error("from foo | eval _score = 10")); + + assertEquals( + "1:48: `_score` is a reserved METADATA attribute", + error("from foo metadata _score | where qstr(\"bar\") | eval _score = _score + 1") + ); + } + + public void testScoreRenaming() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + assertEquals("1:33: `_score` is a reserved METADATA attribute", error("from foo METADATA _id, _score | rename _id as _score")); + + assertTrue(passes("from foo metadata _score | rename _score as foo").stream().anyMatch(a -> a.name().equals("foo"))); + } + + private List passes(String query) { + LogicalPlan logicalPlan = defaultAnalyzer.analyze(parser.createStatement(query)); + assertTrue(logicalPlan.resolved()); + return logicalPlan.output(); + } + + public void testIntervalAsString() { + // DateTrunc + for (String interval : List.of("1 minu", "1 dy", "1.5 minutes", "0.5 days", "minutes 1", "day 5")) { + assertThat( + error("from types | EVAL x = date_trunc(\"" + interval + "\", \"1991-06-26T00:00:00.000Z\")"), + containsString("1:35: Cannot convert string [" + interval + "] to [DATE_PERIOD or TIME_DURATION]") + ); + assertThat( + error("from types | EVAL x = \"1991-06-26T00:00:00.000Z\", y = date_trunc(\"" + interval + "\", x::datetime)"), + containsString("1:67: Cannot convert string [" + interval + "] to [DATE_PERIOD or TIME_DURATION]") + ); + } + for (String interval : List.of("1", "0.5", "invalid")) { + assertThat( + error("from types | EVAL x = date_trunc(\"" + interval + "\", \"1991-06-26T00:00:00.000Z\")"), + containsString( + "1:24: first argument of [date_trunc(\"" + + interval + + "\", \"1991-06-26T00:00:00.000Z\")] must be [dateperiod or timeduration], found value [\"" + + interval + + "\"] type [keyword]" + ) + ); + assertThat( + error("from types | EVAL x = \"1991-06-26T00:00:00.000Z\", y = date_trunc(\"" + interval + "\", x::datetime)"), + containsString( + "1:56: first argument of [date_trunc(\"" + + interval + + "\", x::datetime)] " + + "must be [dateperiod or timeduration], found value [\"" + + interval + + "\"] type [keyword]" + ) + ); + } + + // Bucket + assertEquals( + "1:52: Cannot convert string [1 yar] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'yar']", + error("from test | stats max(emp_no) by bucket(hire_date, \"1 yar\")") + ); + assertEquals( + "1:52: Cannot convert string [1 hur] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'hur']", + error("from test | stats max(emp_no) by bucket(hire_date, \"1 hur\")") + ); + assertEquals( + "1:58: Cannot convert string [1 mu] to [DATE_PERIOD or TIME_DURATION], error [Unexpected temporal unit: 'mu']", + error("from test | stats max = max(emp_no) by bucket(hire_date, \"1 mu\") | sort max ") + ); + assertEquals( + "1:34: second argument of [bucket(hire_date, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | stats max(emp_no) by bucket(hire_date, \"1\")") + ); + assertEquals( + "1:40: second argument of [bucket(hire_date, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | stats max = max(emp_no) by bucket(hire_date, \"1\") | sort max ") + ); + assertEquals( + "1:68: second argument of [bucket(y, \"1\")] must be [integral, date_period or time_duration], " + + "found value [\"1\"] type [keyword]", + error("from test | eval x = emp_no, y = hire_date | stats max = max(x) by bucket(y, \"1\") | sort max ") + ); + } + + public void testCategorizeSingleGrouping() { + assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V4.isEnabled()); + + query("from test | STATS COUNT(*) BY CATEGORIZE(first_name)"); + query("from test | STATS COUNT(*) BY cat = CATEGORIZE(first_name)"); + + assertEquals( + "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("from test | STATS COUNT(*) BY CATEGORIZE(first_name), emp_no") + ); + assertEquals( + "1:39: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY emp_no, CATEGORIZE(first_name)") + ); + assertEquals( + "1:35: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY a = CATEGORIZE(first_name), b = emp_no") + ); + assertEquals( + "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings\n" + + "line 1:55: cannot use CATEGORIZE grouping function [CATEGORIZE(last_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(last_name)") + ); + assertEquals( + "1:31: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] with multiple groupings", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name), CATEGORIZE(first_name)") + ); + } + + public void testCategorizeNestedGrouping() { + assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V4.isEnabled()); + + query("from test | STATS COUNT(*) BY CATEGORIZE(LENGTH(first_name)::string)"); + + assertEquals( + "1:40: CATEGORIZE grouping function [CATEGORIZE(first_name)] can't be used within other expressions", + error("FROM test | STATS COUNT(*) BY MV_COUNT(CATEGORIZE(first_name))") + ); + assertEquals( + "1:31: CATEGORIZE grouping function [CATEGORIZE(first_name)] can't be used within other expressions", + error("FROM test | STATS COUNT(*) BY CATEGORIZE(first_name)::datetime") + ); + } + + public void testCategorizeWithinAggregations() { + assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V4.isEnabled()); + + query("from test | STATS MV_COUNT(cat), COUNT(*) BY cat = CATEGORIZE(first_name)"); + + assertEquals( + "1:25: cannot use CATEGORIZE grouping function [CATEGORIZE(first_name)] within the aggregations", + error("FROM test | STATS COUNT(CATEGORIZE(first_name)) BY CATEGORIZE(first_name)") + ); + + assertEquals( + "1:25: cannot reference CATEGORIZE grouping function [cat] within the aggregations", + error("FROM test | STATS COUNT(cat) BY cat = CATEGORIZE(first_name)") + ); + assertEquals( + "1:30: cannot reference CATEGORIZE grouping function [cat] within the aggregations", + error("FROM test | STATS SUM(LENGTH(cat::keyword) + LENGTH(last_name)) BY cat = CATEGORIZE(first_name)") + ); + assertEquals( + "1:25: cannot reference CATEGORIZE grouping function [`CATEGORIZE(first_name)`] within the aggregations", + error("FROM test | STATS COUNT(`CATEGORIZE(first_name)`) BY CATEGORIZE(first_name)") + ); + } + + public void testSortByAggregate() { + assertEquals("1:18: Aggregate functions are not allowed in SORT [COUNT]", error("ROW a = 1 | SORT count(*)")); + assertEquals("1:28: Aggregate functions are not allowed in SORT [COUNT]", error("ROW a = 1 | SORT to_string(count(*))")); + assertEquals("1:22: Aggregate functions are not allowed in SORT [MAX]", error("ROW a = 1 | SORT 1 + max(a)")); + assertEquals("1:18: Aggregate functions are not allowed in SORT [COUNT]", error("FROM test | SORT count(*)")); + } + private void query(String query) { defaultAnalyzer.analyze(parser.createStatement(query)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index a2aa447c748e9..6dd0c5fe88afd 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -8,20 +8,11 @@ package org.elasticsearch.xpack.esql.expression; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.expression.function.fulltext.FullTextFunction; -import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; -import java.util.ArrayList; -import java.util.List; - public abstract class AbstractExpressionSerializationTests extends AbstractNodeSerializationTests { public static Expression randomChild() { return ReferenceAttributeTests.randomReferenceAttribute(false); @@ -29,17 +20,7 @@ public static Expression randomChild() { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); - entries.addAll(Expression.getNamedWriteables()); - entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsqlScalarFunction.getNamedWriteables()); - entries.addAll(AggregateFunction.getNamedWriteables()); - entries.addAll(FullTextFunction.getNamedWriteables()); - entries.add(UnsupportedAttribute.ENTRY); - entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); - entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); - entries.add(org.elasticsearch.xpack.esql.expression.Order.ENTRY); - return new NamedWriteableRegistry(entries); + return new NamedWriteableRegistry(ExpressionWritables.getNamedWriteables()); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java index ccbed01994bf7..7bb8ab3e147e2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java @@ -11,23 +11,18 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.test.AbstractWireTestCase; import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.tree.SourceTests; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.in; public class AliasTests extends AbstractWireTestCase { public static Alias randomAlias() { @@ -76,10 +71,6 @@ protected Alias copyInstance(Alias instance, TransportVersion version) throws IO @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); - entries.addAll(Attribute.getNamedWriteables()); - entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(Expression.getNamedWriteables()); - return new NamedWriteableRegistry(entries); + return new NamedWriteableRegistry(ExpressionWritables.allExpressions()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index db5d8e03458ea..df1675ba22568 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -111,7 +111,8 @@ protected static List withNoRowsExpectingNull(List entries = new ArrayList<>(Attribute.getNamedWriteables()); - entries.add(UnsupportedAttribute.ENTRY); - return new NamedWriteableRegistry(entries); + return new NamedWriteableRegistry(ExpressionWritables.attributes()); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 6a552f400d36e..da92eba1e4a05 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -44,6 +44,7 @@ import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; +import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; @@ -130,7 +131,9 @@ public abstract class AbstractFunctionTestCase extends ESTestCase { entry("mod", Mod.class), entry("neg", Neg.class), entry("is_null", IsNull.class), - entry("is_not_null", IsNotNull.class) + entry("is_not_null", IsNotNull.class), + // Match operator is both a function and an operator + entry("match_operator", Match.class) ); private static EsqlFunctionRegistry functionRegistry = new EsqlFunctionRegistry().snapshotRegistry(); @@ -226,7 +229,8 @@ protected static List anyNullIsNull( oc.getExpectedTypeError(), null, null, - null + null, + oc.canBuildEvaluator() ); })); @@ -257,7 +261,8 @@ protected static List anyNullIsNull( oc.getExpectedTypeError(), null, null, - null + null, + oc.canBuildEvaluator() ); })); } @@ -645,18 +650,7 @@ protected static List randomizeBytesRefsOffset(List ":"; + default -> null; + }; + } + /** * If this tests is for a unary operator return its symbol, otherwise return {@code null}. * This is functionally the reverse of {@link ExpressionBuilder#visitArithmeticUnary}. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index 65e8a53fc05c5..9d4687568ca9d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -345,6 +345,7 @@ public final void testFold() { return; } assertFalse("expected resolved", expression.typeResolved().unresolved()); + assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); Expression nullOptimized = new FoldNull().rule(expression); assertThat(nullOptimized.dataType(), equalTo(testCase.expectedType())); assertTrue(nullOptimized.foldable()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java new file mode 100644 index 0000000000000..98f36d339976c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; +import org.elasticsearch.xpack.esql.analysis.Verifier; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.function.Function; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.stats.Metrics; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyzerDefaultMapping; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.defaultEnrichResolution; +import static org.hamcrest.Matchers.containsString; + +public class CheckLicenseTests extends ESTestCase { + + private final EsqlParser parser = new EsqlParser(); + private final String esql = "from tests | eval license() | LIMIT 10"; + + public void testLicense() { + for (License.OperationMode functionLicense : License.OperationMode.values()) { + final LicensedFeature functionLicenseFeature = random().nextBoolean() + ? LicensedFeature.momentary("test", "license", functionLicense) + : LicensedFeature.persistent("test", "license", functionLicense); + final EsqlFunctionRegistry.FunctionBuilder builder = (source, expression, cfg) -> { + final LicensedFunction licensedFunction = new LicensedFunction(source); + licensedFunction.setLicensedFeature(functionLicenseFeature); + return licensedFunction; + }; + for (License.OperationMode operationMode : License.OperationMode.values()) { + if (License.OperationMode.TRIAL != operationMode && License.OperationMode.compare(operationMode, functionLicense) < 0) { + // non-compliant license + final VerificationException ex = expectThrows(VerificationException.class, () -> analyze(builder, operationMode)); + assertThat(ex.getMessage(), containsString("current license is non-compliant for function [license()]")); + } else { + // compliant license + assertNotNull(analyze(builder, operationMode)); + } + } + } + } + + private LogicalPlan analyze(EsqlFunctionRegistry.FunctionBuilder builder, License.OperationMode operationMode) { + final FunctionDefinition def = EsqlFunctionRegistry.def(LicensedFunction.class, builder, "license"); + final EsqlFunctionRegistry registry = new EsqlFunctionRegistry(def) { + @Override + public EsqlFunctionRegistry snapshotRegistry() { + return this; + } + }; + return analyzer(registry, operationMode).analyze(parser.createStatement(esql)); + } + + private static Analyzer analyzer(EsqlFunctionRegistry registry, License.OperationMode operationMode) { + return new Analyzer( + new AnalyzerContext(EsqlTestUtils.TEST_CFG, registry, analyzerDefaultMapping(), defaultEnrichResolution()), + new Verifier(new Metrics(new EsqlFunctionRegistry()), getLicenseState(operationMode)) + ); + } + + private static XPackLicenseState getLicenseState(License.OperationMode operationMode) { + final TestUtils.UpdatableLicenseState licenseState = new TestUtils.UpdatableLicenseState(); + licenseState.update(new XPackLicenseStatus(operationMode, true, null)); + return licenseState; + } + + // It needs to be public because we run validation on it via reflection in org.elasticsearch.xpack.esql.tree.EsqlNodeSubclassTests. + // This test prevents to add the license as constructor parameter too. + public static class LicensedFunction extends Function { + + private LicensedFeature licensedFeature; + + public LicensedFunction(Source source) { + super(source, List.of()); + } + + void setLicensedFeature(LicensedFeature licensedFeature) { + this.licensedFeature = licensedFeature; + } + + @Override + public boolean checkLicense(XPackLicenseState state) { + if (licensedFeature instanceof LicensedFeature.Momentary momentary) { + return momentary.check(state); + } else { + return licensedFeature.checkWithoutTracking(state); + } + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + + @Override + public void writeTo(StreamOutput out) { + throw new UnsupportedOperationException(); + } + } + +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java index 7fe67707a7976..775ca45bfa124 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java @@ -262,6 +262,42 @@ public static List dateCases(int minRows, int maxRows) { return cases; } + /** + * + * Generate cases for {@link DataType#DATE_NANOS}. + * + */ + public static List dateNanosCases(int minRows, int maxRows) { + List cases = new ArrayList<>(); + addSuppliers(cases, minRows, maxRows, "<1970-01-01T00:00:00.000000000Z>", DataType.DATE_NANOS, () -> 0L); + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.DATE_NANOS, + () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11) + ); + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.DATE_NANOS, + () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE) + ); + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.DATE_NANOS, + () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE) + ); + + return cases; + } + public static List booleanCases(int minRows, int maxRows) { List cases = new ArrayList<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java index df0737feadd8d..43e2ededeff0e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java @@ -89,6 +89,18 @@ static String binaryOperator(String operator) throws IOException { return toSvg(new Sequence(expressions.toArray(Expression[]::new))); } + /** + * Generate a railroad diagram for a search operator. The output would look like + * {@code field : value}. + */ + static String searchOperator(String operator) throws IOException { + List expressions = new ArrayList<>(); + expressions.add(new Literal("field")); + expressions.add(new Syntax(operator)); + expressions.add(new Literal("query")); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + /** * Generate a railroad diagram for unary operator. The output would look like * {@code -v}. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index c12e0a8684ba9..d78dfd3141a04 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1431,6 +1431,34 @@ public static TestCase typeError(List data, String expectedTypeError) Class foldingExceptionClass, String foldingExceptionMessage, Object extra + ) { + this( + data, + evaluatorToString, + expectedType, + matcher, + expectedWarnings, + expectedBuildEvaluatorWarnings, + expectedTypeError, + foldingExceptionClass, + foldingExceptionMessage, + extra, + data.stream().allMatch(d -> d.forceLiteral || DataType.isRepresentable(d.type)) + ); + } + + TestCase( + List data, + Matcher evaluatorToString, + DataType expectedType, + Matcher matcher, + String[] expectedWarnings, + String[] expectedBuildEvaluatorWarnings, + String expectedTypeError, + Class foldingExceptionClass, + String foldingExceptionMessage, + Object extra, + boolean canBuildEvaluator ) { this.source = Source.EMPTY; this.data = data; @@ -1442,10 +1470,10 @@ public static TestCase typeError(List data, String expectedTypeError) this.expectedWarnings = expectedWarnings; this.expectedBuildEvaluatorWarnings = expectedBuildEvaluatorWarnings; this.expectedTypeError = expectedTypeError; - this.canBuildEvaluator = data.stream().allMatch(d -> d.forceLiteral || DataType.isRepresentable(d.type)); this.foldingExceptionClass = foldingExceptionClass; this.foldingExceptionMessage = foldingExceptionMessage; this.extra = extra; + this.canBuildEvaluator = canBuildEvaluator; } public Source getSource() { @@ -1520,6 +1548,25 @@ public Object extra() { return extra; } + /** + * Build a new {@link TestCase} with new {@link #data}. + */ + public TestCase withData(List data) { + return new TestCase( + data, + evaluatorToString, + expectedType, + matcher, + expectedWarnings, + expectedBuildEvaluatorWarnings, + expectedTypeError, + foldingExceptionClass, + foldingExceptionMessage, + extra, + canBuildEvaluator + ); + } + /** * Build a new {@link TestCase} with new {@link #extra()}. */ @@ -1534,7 +1581,8 @@ public TestCase withExtra(Object extra) { expectedTypeError, foldingExceptionClass, foldingExceptionMessage, - extra + extra, + canBuildEvaluator ); } @@ -1549,7 +1597,8 @@ public TestCase withWarning(String warning) { expectedTypeError, foldingExceptionClass, foldingExceptionMessage, - extra + extra, + canBuildEvaluator ); } @@ -1568,7 +1617,8 @@ public TestCase withBuildEvaluatorWarning(String warning) { expectedTypeError, foldingExceptionClass, foldingExceptionMessage, - extra + extra, + canBuildEvaluator ); } @@ -1592,7 +1642,30 @@ public TestCase withFoldingException(Class clazz, String me expectedTypeError, clazz, message, - extra + extra, + canBuildEvaluator + ); + } + + /** + * Build a new {@link TestCase} that can't build an evaluator. + *

    + * Useful for special cases that can't be executed, but should still be considered. + *

    + */ + public TestCase withoutEvaluator() { + return new TestCase( + data, + evaluatorToString, + expectedType, + matcher, + expectedWarnings, + expectedBuildEvaluatorWarnings, + expectedTypeError, + foldingExceptionClass, + foldingExceptionMessage, + extra, + false ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctTests.java index 5e23083d7c810..e0b8c1356d087 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinctTests.java @@ -52,11 +52,13 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), MultiRowTestCaseSupplier.dateCases(1, 1000), + MultiRowTestCaseSupplier.dateNanosCases(1, 1000), MultiRowTestCaseSupplier.booleanCases(1, 1000), MultiRowTestCaseSupplier.ipCases(1, 1000), MultiRowTestCaseSupplier.versionCases(1, 1000), MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), - MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.SEMANTIC_TEXT) ).flatMap(List::stream).forEach(fieldCaseSupplier -> { // With precision for (var precisionCaseSupplier : precisionSuppliers) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountTests.java index 979048534edbf..131072acff870 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountTests.java @@ -47,7 +47,8 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.geoPointCases(1, 1000, true), MultiRowTestCaseSupplier.cartesianPointCases(1, 1000, true), MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), - MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.SEMANTIC_TEXT) ).flatMap(List::stream).map(CountTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); // No rows diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index 9756804a1ec0f..ae5b3691b0a7d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -48,7 +48,8 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.ipCases(1, 1000), MultiRowTestCaseSupplier.versionCases(1, 1000), MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), - MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.SEMANTIC_TEXT) ).flatMap(List::stream).map(MaxTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -90,6 +91,15 @@ public static Iterable parameters() { equalTo(200L) ) ), + new TestCaseSupplier( + List.of(DataType.DATE_NANOS), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.DATE_NANOS, "field")), + "Max[field=Attribute[channel=0]]", + DataType.DATE_NANOS, + equalTo(200L) + ) + ), new TestCaseSupplier( List.of(DataType.BOOLEAN), () -> new TestCaseSupplier.TestCase( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index 171181496c889..ad2953f057635 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -48,7 +48,8 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.ipCases(1, 1000), MultiRowTestCaseSupplier.versionCases(1, 1000), MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), - MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.SEMANTIC_TEXT) ).flatMap(List::stream).map(MinTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); suppliers.addAll( @@ -90,6 +91,15 @@ public static Iterable parameters() { equalTo(200L) ) ), + new TestCaseSupplier( + List.of(DataType.DATE_NANOS), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.multiRow(List.of(200L), DataType.DATE_NANOS, "field")), + "Min[field=Attribute[channel=0]]", + DataType.DATE_NANOS, + equalTo(200L) + ) + ), new TestCaseSupplier( List.of(DataType.BOOLEAN), () -> new TestCaseSupplier.TestCase( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/StdDevTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/StdDevTests.java new file mode 100644 index 0000000000000..85b96e29d1f6a --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/StdDevTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.compute.aggregation.WelfordAlgorithm; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractAggregationTestCase; +import org.elasticsearch.xpack.esql.expression.function.MultiRowTestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class StdDevTests extends AbstractAggregationTestCase { + public StdDevTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + var suppliers = new ArrayList(); + + Stream.of( + MultiRowTestCaseSupplier.intCases(1, 1000, Integer.MIN_VALUE, Integer.MAX_VALUE, true), + MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), + MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true) + ).flatMap(List::stream).map(StdDevTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); + + return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); + } + + @Override + protected Expression build(Source source, List args) { + return new StdDev(source, args.get(0)); + } + + private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier fieldSupplier) { + return new TestCaseSupplier(List.of(fieldSupplier.type()), () -> { + var fieldTypedData = fieldSupplier.get(); + var fieldValues = fieldTypedData.multiRowData(); + + WelfordAlgorithm welfordAlgorithm = new WelfordAlgorithm(); + + for (var fieldValue : fieldValues) { + var value = ((Number) fieldValue).doubleValue(); + welfordAlgorithm.add(value); + } + var result = welfordAlgorithm.evaluate(); + var expected = Double.isInfinite(result) ? null : result; + return new TestCaseSupplier.TestCase( + List.of(fieldTypedData), + "StdDev[field=Attribute[channel=0]]", + DataType.DOUBLE, + equalTo(expected) + ); + }); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java index f7bf338caa099..f236e4d8faf98 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/TopTests.java @@ -48,7 +48,8 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.booleanCases(1, 1000), MultiRowTestCaseSupplier.ipCases(1, 1000), MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.KEYWORD), - MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT) + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.TEXT), + MultiRowTestCaseSupplier.stringCases(1, 1000, DataType.SEMANTIC_TEXT) ) .flatMap(List::stream) .map(fieldCaseSupplier -> TopTests.makeSupplier(fieldCaseSupplier, limitCaseSupplier, order)) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java index 55320543d0ec3..5f35f8cada397 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ValuesTests.java @@ -45,12 +45,14 @@ public static Iterable parameters() { MultiRowTestCaseSupplier.longCases(1, 1000, Long.MIN_VALUE, Long.MAX_VALUE, true), MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true), MultiRowTestCaseSupplier.dateCases(1, 1000), + MultiRowTestCaseSupplier.dateNanosCases(1, 1000), MultiRowTestCaseSupplier.booleanCases(1, 1000), MultiRowTestCaseSupplier.ipCases(1, 1000), MultiRowTestCaseSupplier.versionCases(1, 1000), // Lower values for strings, as they take more space and may trigger the circuit breaker MultiRowTestCaseSupplier.stringCases(1, 20, DataType.KEYWORD), - MultiRowTestCaseSupplier.stringCases(1, 20, DataType.TEXT) + MultiRowTestCaseSupplier.stringCases(1, 20, DataType.TEXT), + MultiRowTestCaseSupplier.stringCases(1, 20, DataType.SEMANTIC_TEXT) ).flatMap(List::stream).map(ValuesTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); return parameterSuppliersFromTypedDataWithDefaultChecks( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/KqlTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/KqlTests.java new file mode 100644 index 0000000000000..d97be6b169eef --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/KqlTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.fulltext; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.junit.BeforeClass; + +import java.util.List; +import java.util.function.Supplier; + +public class KqlTests extends NoneFieldFullTextFunctionTestCase { + @BeforeClass + protected static void ensureKqlFunctionEnabled() { + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + } + + public KqlTests(@Name("TestCase") Supplier testCaseSupplier) { + super(testCaseSupplier); + } + + @ParametersFactory + public static Iterable parameters() { + return generateParameters(); + } + + @Override + protected Expression build(Source source, List args) { + return new Kql(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchOperatorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchOperatorTests.java new file mode 100644 index 0000000000000..32e9670286ef7 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchOperatorTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.fulltext; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.FunctionName; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.LinkedList; +import java.util.List; +import java.util.function.Supplier; + +/** + * This class is only used to generates docs for the match operator - all testing is done in {@link MatchTests} + */ +@FunctionName("match_operator") +public class MatchOperatorTests extends MatchTests { + + public MatchOperatorTests(@Name("TestCase") Supplier testCaseSupplier) { + super(testCaseSupplier); + } + + @ParametersFactory + public static Iterable parameters() { + // Have a minimal test so that we can generate the appropriate types in the docs + List suppliers = new LinkedList<>(); + addPositiveTestCase(List.of(DataType.KEYWORD, DataType.KEYWORD), suppliers); + addPositiveTestCase(List.of(DataType.TEXT, DataType.TEXT), suppliers); + addPositiveTestCase(List.of(DataType.KEYWORD, DataType.TEXT), suppliers); + addPositiveTestCase(List.of(DataType.TEXT, DataType.KEYWORD), suppliers); + return parameterSuppliersFromTypedData(suppliers); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java index 6d0c45a972299..6a4a7404135f9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/MatchTests.java @@ -36,19 +36,11 @@ public MatchTests(@Name("TestCase") Supplier testCase @ParametersFactory public static Iterable parameters() { - Set supportedTextParams = Set.of(DataType.KEYWORD, DataType.TEXT); - Set supportedNumericParams = Set.of(DataType.DOUBLE, DataType.INTEGER); - Set supportedFuzzinessParams = Set.of(DataType.INTEGER, DataType.KEYWORD, DataType.TEXT); - List> supportedPerPosition = List.of( - supportedTextParams, - supportedTextParams, - supportedNumericParams, - supportedFuzzinessParams - ); + List> supportedPerPosition = supportedParams(); List suppliers = new LinkedList<>(); for (DataType fieldType : DataType.stringTypes()) { for (DataType queryType : DataType.stringTypes()) { - addPositiveTestCase(List.of(fieldType, queryType), supportedPerPosition, suppliers); + addPositiveTestCase(List.of(fieldType, queryType), suppliers); addNonFieldTestCase(List.of(fieldType, queryType), supportedPerPosition, suppliers); } } @@ -61,11 +53,20 @@ public static Iterable parameters() { ); } - private static void addPositiveTestCase( - List paramDataTypes, - List> supportedPerPosition, - List suppliers - ) { + protected static List> supportedParams() { + Set supportedTextParams = Set.of(DataType.KEYWORD, DataType.TEXT); + Set supportedNumericParams = Set.of(DataType.DOUBLE, DataType.INTEGER); + Set supportedFuzzinessParams = Set.of(DataType.INTEGER, DataType.KEYWORD, DataType.TEXT); + List> supportedPerPosition = List.of( + supportedTextParams, + supportedTextParams, + supportedNumericParams, + supportedFuzzinessParams + ); + return supportedPerPosition; + } + + protected static void addPositiveTestCase(List paramDataTypes, List suppliers) { // Positive case - creates an ES field from the field parameter type suppliers.add( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/NoneFieldFullTextFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/NoneFieldFullTextFunctionTestCase.java new file mode 100644 index 0000000000000..383cb8671053d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/NoneFieldFullTextFunctionTestCase.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.fulltext; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matcher; + +import java.util.LinkedList; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public abstract class NoneFieldFullTextFunctionTestCase extends AbstractFunctionTestCase { + + public NoneFieldFullTextFunctionTestCase(Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + public final void testFold() { + Expression expression = buildLiteralExpression(testCase); + if (testCase.getExpectedTypeError() != null) { + assertTypeResolutionFailure(expression); + return; + } + assertFalse("expected resolved", expression.typeResolved().unresolved()); + } + + protected static Iterable generateParameters() { + List suppliers = new LinkedList<>(); + for (DataType strType : DataType.stringTypes()) { + suppliers.add( + new TestCaseSupplier( + "<" + strType + ">", + List.of(strType), + () -> testCase(strType, randomAlphaOfLengthBetween(1, 10), equalTo(true)) + ) + ); + } + List errorsSuppliers = errorsForCasesWithoutExamples(suppliers, (v, p) -> "string"); + // Don't test null, as it is not allowed but the expected message is not a type error - so we check it separately in VerifierTests + return parameterSuppliersFromTypedData(errorsSuppliers.stream().filter(s -> s.types().contains(DataType.NULL) == false).toList()); + } + + private static TestCaseSupplier.TestCase testCase(DataType strType, String str, Matcher matcher) { + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(new BytesRef(str), strType, "query")), + "EndsWithEvaluator[str=Attribute[channel=0], suffix=Attribute[channel=1]]", + DataType.BOOLEAN, + matcher + ); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java index b4b4ebcaacde6..f573e59ab205a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/fulltext/QueryStringTests.java @@ -10,61 +10,24 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.FunctionName; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.hamcrest.Matcher; -import java.util.LinkedList; import java.util.List; import java.util.function.Supplier; -import static org.hamcrest.Matchers.equalTo; - @FunctionName("qstr") -public class QueryStringTests extends AbstractFunctionTestCase { +public class QueryStringTests extends NoneFieldFullTextFunctionTestCase { public QueryStringTests(@Name("TestCase") Supplier testCaseSupplier) { - this.testCase = testCaseSupplier.get(); + super(testCaseSupplier); } @ParametersFactory public static Iterable parameters() { - List suppliers = new LinkedList<>(); - for (DataType strType : DataType.stringTypes()) { - suppliers.add( - new TestCaseSupplier( - "<" + strType + ">", - List.of(strType), - () -> testCase(strType, randomAlphaOfLengthBetween(1, 10), equalTo(true)) - ) - ); - } - List errorsSuppliers = errorsForCasesWithoutExamples(suppliers, (v, p) -> "string"); - // Don't test null, as it is not allowed but the expected message is not a type error - so we check it separately in VerifierTests - return parameterSuppliersFromTypedData(errorsSuppliers.stream().filter(s -> s.types().contains(DataType.NULL) == false).toList()); - } - - public final void testFold() { - Expression expression = buildLiteralExpression(testCase); - if (testCase.getExpectedTypeError() != null) { - assertTypeResolutionFailure(expression); - return; - } - assertFalse("expected resolved", expression.typeResolved().unresolved()); - } - - private static TestCaseSupplier.TestCase testCase(DataType strType, String str, Matcher matcher) { - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(new BytesRef(str), strType, "query")), - "EndsWithEvaluator[str=Attribute[channel=0], suffix=Attribute[channel=1]]", - DataType.BOOLEAN, - matcher - ); + return generateParameters(); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeTests.java index f93389d5cb659..d29ac635e4bb7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/grouping/CategorizeTests.java @@ -23,6 +23,12 @@ import static org.hamcrest.Matchers.equalTo; +/** + * Dummy test implementation for Categorize. Used just to generate documentation. + *

    + * Most test cases are currently skipped as this function can't build an evaluator. + *

    + */ public class CategorizeTests extends AbstractScalarFunctionTestCase { public CategorizeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); @@ -37,11 +43,11 @@ public static Iterable parameters() { "text with " + dataType.typeName(), List.of(dataType), () -> new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(new BytesRef("blah blah blah"), dataType, "f")), - "CategorizeEvaluator[v=Attribute[channel=0]]", - DataType.INTEGER, - equalTo(0) - ) + List.of(new TestCaseSupplier.TypedData(new BytesRef(""), dataType, "field")), + "", + DataType.KEYWORD, + equalTo(new BytesRef("")) + ).withoutEvaluator() ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index 65f5653f27e1a..11894cf5b847b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -14,7 +14,6 @@ import org.elasticsearch.geometry.Geometry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.plugin.EsqlCorePlugin; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.NumericUtils; @@ -397,9 +396,6 @@ protected static void dateNanos( DataType expectedDataType, BiFunction> matcher ) { - if (EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG.isEnabled() == false) { - return; - } cases.add( new TestCaseSupplier( name + "(epoch nanos)", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index e8f9f26a76f43..0d114b4964920 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -113,8 +113,8 @@ public static Iterable parameters() { "rhs", (l, r) -> ((Number) l).longValue() < ((Number) r).longValue(), DataType.BOOLEAN, - TestCaseSupplier.dateCases(), - TestCaseSupplier.dateCases(), + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.dateNanosCases(), List.of(), false ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/StringQuerySerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/StringQuerySerializationTests.java deleted file mode 100644 index ff00a161e1bb1..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/fulltext/StringQuerySerializationTests.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.expression.predicate.operator.fulltext; - -import org.elasticsearch.xpack.esql.core.expression.predicate.fulltext.StringQueryPredicate; - -import java.io.IOException; - -public class StringQuerySerializationTests extends AbstractFulltextSerializationTests { - - private static final String COMMA = ","; - - @Override - protected final StringQueryPredicate createTestInstance() { - return new StringQueryPredicate(randomSource(), randomAlphaOfLength(randomIntBetween(1, 16)), randomOptionOrNull()); - } - - @Override - protected StringQueryPredicate mutateInstance(StringQueryPredicate instance) throws IOException { - var query = instance.query(); - var options = instance.options(); - if (randomBoolean()) { - query = randomValueOtherThan(query, () -> randomAlphaOfLength(randomIntBetween(1, 16))); - } else { - options = randomValueOtherThan(options, this::randomOptionOrNull); - } - return new StringQueryPredicate(instance.source(), query, options); - } -} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java index d3e1710a715af..9a1a30b892b22 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -23,10 +24,10 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.expression.ExpressionWritables; import org.elasticsearch.xpack.esql.expression.function.FieldAttributeTests; import org.elasticsearch.xpack.esql.expression.function.MetadataAttributeTests; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; -import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttributeTests; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.type.EsFieldTests; @@ -280,9 +281,8 @@ private Column randomColumn() { static { List writeables = new ArrayList<>(); - writeables.addAll(Block.getNamedWriteables()); - writeables.addAll(Attribute.getNamedWriteables()); - writeables.add(UnsupportedAttribute.ENTRY); + writeables.addAll(BlockWritables.getNamedWriteables()); + writeables.addAll(ExpressionWritables.attributes()); REGISTRY = new NamedWriteableRegistry(new ArrayList<>(new HashSet<>(writeables))); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 073a51ee69114..86f5c812737b1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.EsqlTestUtils.TestSearchStats; @@ -41,7 +42,9 @@ import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.IndexResolution; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ExtractAggregateCommonFilter; import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; @@ -58,16 +61,20 @@ import org.elasticsearch.xpack.esql.planner.FilterTests; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; +import org.elasticsearch.xpack.esql.rule.Rule; import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.stats.Metrics; import org.elasticsearch.xpack.esql.stats.SearchContextStats; import org.elasticsearch.xpack.esql.stats.SearchStats; +import org.elasticsearch.xpack.kql.query.KqlQueryBuilder; import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; import static java.util.Arrays.asList; import static org.elasticsearch.compute.aggregation.AggregatorMode.FINAL; @@ -145,7 +152,7 @@ private Analyzer makeAnalyzer(String mappingFileName, EnrichResolution enrichRes return new Analyzer( new AnalyzerContext(config, new EsqlFunctionRegistry(), getIndexResult, enrichResolution), - new Verifier(new Metrics(new EsqlFunctionRegistry())) + new Verifier(new Metrics(new EsqlFunctionRegistry()), new XPackLicenseState(() -> 0L)) ); } @@ -378,6 +385,67 @@ public void testMultiCountAllWithFilter() { assertThat(plan.anyMatch(EsQueryExec.class::isInstance), is(true)); } + @SuppressWarnings("unchecked") + public void testSingleCountWithStatsFilter() { + // an optimizer that filters out the ExtractAggregateCommonFilter rule + var logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(config)) { + @Override + protected List> batches() { + var oldBatches = super.batches(); + List> newBatches = new ArrayList<>(oldBatches.size()); + for (var batch : oldBatches) { + List> rules = new ArrayList<>(List.of(batch.rules())); + rules.removeIf(r -> r instanceof ExtractAggregateCommonFilter); + newBatches.add(batch.with(rules.toArray(Rule[]::new))); + } + return newBatches; + } + }; + var analyzer = makeAnalyzer("mapping-default.json"); + var plannerOptimizer = new TestPlannerOptimizer(config, analyzer, logicalOptimizer); + var plan = plannerOptimizer.plan(""" + from test + | stats c = count(hire_date) where emp_no < 10042 + """, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + assertThat(agg.getMode(), is(FINAL)); + var exchange = as(agg.child(), ExchangeExec.class); + var esStatsQuery = as(exchange.child(), EsStatsQueryExec.class); + + Function compact = s -> s.replaceAll("\\s+", ""); + assertThat(compact.apply(esStatsQuery.query().toString()), is(compact.apply(""" + { + "bool": { + "must": [ + { + "exists": { + "field": "hire_date", + "boost": 1.0 + } + }, + { + "esql_single_value": { + "field": "emp_no", + "next": { + "range": { + "emp_no": { + "lt": 10042, + "boost": 1.0 + } + } + }, + "source": "emp_no < 10042@2:36" + } + } + ], + "boost": 1.0 + } + } + """))); + } + /** * Expecting * LimitExec[1000[INTEGER]] @@ -678,7 +746,7 @@ public void testMatchFunctionMultipleWhereClauses() { * \_EsQueryExec[test], indexMode[standard], query[{"bool":{"must":[{"match":{"last_name":{"query":"Smith"}}}, * {"match":{"first_name":{"query":"John"}}}],"boost":1.0}}][_doc{f}#14], limit[1000], sort[] estimatedRowSize[324] */ - public void testMatchFunctionMultipleQstrClauses() { + public void testMatchFunctionMultipleMatchClauses() { String queryText = """ from test | where match(last_name, "Smith") and match(first_name, "John") @@ -698,6 +766,182 @@ public void testMatchFunctionMultipleQstrClauses() { assertThat(query.query().toString(), is(expected.toString())); } + /** + * Expecting + * LimitExec[1000[INTEGER]] + * \_ExchangeExec[[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gender{f}#4, job{f}#9, job.raw{f}#10, languages{f}#5, last_na + * me{f}#6, long_noidx{f}#11, salary{f}#7],false] + * \_ProjectExec[[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gender{f}#4, job{f}#9, job.raw{f}#10, languages{f}#5, last_na + * me{f}#6, long_noidx{f}#11, salary{f}#7]] + * \_FieldExtractExec[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gen] + * \_EsQueryExec[test], indexMode[standard], query[{"kql":{"query":"last_name: Smith"}}] + */ + public void testKqlFunction() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + var plan = plannerOptimizer.plan(""" + from test + | where kql("last_name: Smith") + """, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var field = as(project.child(), FieldExtractExec.class); + var query = as(field.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(1000)); + var expected = kqlQueryBuilder("last_name: Smith"); + assertThat(query.query().toString(), is(expected.toString())); + } + + /** + * Expecting + * LimitExec[1000[INTEGER]] + * \_ExchangeExec[[_meta_field{f}#1419, emp_no{f}#1413, first_name{f}#1414, gender{f}#1415, job{f}#1420, job.raw{f}#1421, langua + * ges{f}#1416, last_name{f}#1417, long_noidx{f}#1422, salary{f}#1418],false] + * \_ProjectExec[[_meta_field{f}#1419, emp_no{f}#1413, first_name{f}#1414, gender{f}#1415, job{f}#1420, job.raw{f}#1421, langua + * ges{f}#1416, last_name{f}#1417, long_noidx{f}#1422, salary{f}#1418]] + * \_FieldExtractExec[_meta_field{f}#1419, emp_no{f}#1413, first_name{f}#] + * \_EsQueryExec[test], indexMode[standard], query[{"bool":{"must":[{"kql":{"query":"last_name: Smith"}} + * ,{"esql_single_value":{"field":"emp_no","next":{"range":{"emp_no":{"gt":10010,"boost":1.0}}},"source":"emp_no > 10010"}}], + * "boost":1.0}}][_doc{f}#1423], limit[1000], sort[] estimatedRowSize[324] + */ + public void testKqlFunctionConjunctionWhereOperands() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + String queryText = """ + from test + | where kql("last_name: Smith") and emp_no > 10010 + """; + var plan = plannerOptimizer.plan(queryText, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var field = as(project.child(), FieldExtractExec.class); + var query = as(field.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(1000)); + + Source filterSource = new Source(2, 36, "emp_no > 10000"); + var range = wrapWithSingleQuery(queryText, QueryBuilders.rangeQuery("emp_no").gt(10010), "emp_no", filterSource); + var kqlQuery = kqlQueryBuilder("last_name: Smith"); + var expected = QueryBuilders.boolQuery().must(kqlQuery).must(range); + assertThat(query.query().toString(), is(expected.toString())); + } + + /** + * Expecting + * LimitExec[1000[INTEGER]] + * \_ExchangeExec[[!alias_integer, boolean{f}#4, byte{f}#5, constant_keyword-foo{f}#6, date{f}#7, double{f}#8, float{f}#9, half_ + * float{f}#10, integer{f}#12, ip{f}#13, keyword{f}#14, long{f}#15, scaled_float{f}#11, short{f}#17, text{f}#18, unsigned_long{f}#16], + * false] + * \_ProjectExec[[!alias_integer, boolean{f}#4, byte{f}#5, constant_keyword-foo{f}#6, date{f}#7, double{f}#8, float{f}#9, half_ + * float{f}#10, integer{f}#12, ip{f}#13, keyword{f}#14, long{f}#15, scaled_float{f}#11, short{f}#17, text{f}#18, unsigned_long{f}#16] + * \_FieldExtractExec[!alias_integer, boolean{f}#4, byte{f}#5, constant_k..] + * \_EsQueryExec[test], indexMode[standard], query[{"bool":{"must":[{"kql":{"query":"last_name: Smith"}},{ + * "esql_single_value":{"field":"ip","next":{"terms":{"ip":["127.0.0.1/32"],"boost":1.0}}, + * "source":"cidr_match(ip, \"127.0.0.1/32\")@2:38"}}],"boost":1.0}}][_doc{f}#21], limit[1000], sort[] estimatedRowSize[354] + */ + public void testKqlFunctionWithFunctionsPushedToLucene() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + String queryText = """ + from test + | where kql("last_name: Smith") and cidr_match(ip, "127.0.0.1/32") + """; + var analyzer = makeAnalyzer("mapping-all-types.json"); + var plan = plannerOptimizer.plan(queryText, IS_SV_STATS, analyzer); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var field = as(project.child(), FieldExtractExec.class); + var query = as(field.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(1000)); + + Source filterSource = new Source(2, 36, "cidr_match(ip, \"127.0.0.1/32\")"); + var terms = wrapWithSingleQuery(queryText, QueryBuilders.termsQuery("ip", "127.0.0.1/32"), "ip", filterSource); + var kqlQuery = kqlQueryBuilder("last_name: Smith"); + var expected = QueryBuilders.boolQuery().must(kqlQuery).must(terms); + assertThat(query.query().toString(), is(expected.toString())); + } + + /** + * Expecting + * LimitExec[1000[INTEGER]] + * \_ExchangeExec[[_meta_field{f}#1163, emp_no{f}#1157, first_name{f}#1158, gender{f}#1159, job{f}#1164, job.raw{f}#1165, langua + * ges{f}#1160, last_name{f}#1161, long_noidx{f}#1166, salary{f}#1162],false] + * \_ProjectExec[[_meta_field{f}#1163, emp_no{f}#1157, first_name{f}#1158, gender{f}#1159, job{f}#1164, job.raw{f}#1165, langua + * ges{f}#1160, last_name{f}#1161, long_noidx{f}#1166, salary{f}#1162]] + * \_FieldExtractExec[_meta_field{f}#1163, emp_no{f}#1157, first_name{f}#] + * \_EsQueryExec[test], indexMode[standard], + * query[{"bool":{"must":[{"kql":{"query":"last_name: Smith"}}, + * {"esql_single_value":{"field":"emp_no","next":{"range":{"emp_no":{"gt":10010,"boost":1.0}}},"source":"emp_no > 10010@3:9"}}], + * "boost":1.0}}][_doc{f}#1167], limit[1000], sort[] estimatedRowSize[324] + */ + public void testKqlFunctionMultipleWhereClauses() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + String queryText = """ + from test + | where kql("last_name: Smith") + | where emp_no > 10010 + """; + var plan = plannerOptimizer.plan(queryText, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var field = as(project.child(), FieldExtractExec.class); + var query = as(field.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(1000)); + + Source filterSource = new Source(3, 8, "emp_no > 10000"); + var range = wrapWithSingleQuery(queryText, QueryBuilders.rangeQuery("emp_no").gt(10010), "emp_no", filterSource); + var kqlQuery = kqlQueryBuilder("last_name: Smith"); + var expected = QueryBuilders.boolQuery().must(kqlQuery).must(range); + assertThat(query.query().toString(), is(expected.toString())); + } + + /** + * Expecting + * LimitExec[1000[INTEGER]] + * \_ExchangeExec[[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gender{f}#4, job{f}#9, job.raw{f}#10, languages{f}#5, last_na + * me{f}#6, long_noidx{f}#11, salary{f}#7],false] + * \_ProjectExec[[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gender{f}#4, job{f}#9, job.raw{f}#10, languages{f}#5, last_na + * me{f}#6, long_noidx{f}#11, salary{f}#7]] + * \_FieldExtractExec[_meta_field{f}#8, emp_no{f}#2, first_name{f}#3, gen] + * \_EsQueryExec[test], indexMode[standard], query[{"bool": {"must":[ + * {"kql":{"query":"last_name: Smith"}}, + * {"kql":{"query":"emp_no > 10010"}}],"boost":1.0}}] + */ + public void testKqlFunctionMultipleKqlClauses() { + // Skip test if the kql function is not enabled. + assumeTrue("kql function capability not available", EsqlCapabilities.Cap.KQL_FUNCTION.isEnabled()); + + String queryText = """ + from test + | where kql("last_name: Smith") and kql("emp_no > 10010") + """; + var plan = plannerOptimizer.plan(queryText, IS_SV_STATS); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var field = as(project.child(), FieldExtractExec.class); + var query = as(field.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(1000)); + + var kqlQueryLeft = kqlQueryBuilder("last_name: Smith"); + var kqlQueryRight = kqlQueryBuilder("emp_no > 10010"); + var expected = QueryBuilders.boolQuery().must(kqlQueryLeft).must(kqlQueryRight); + assertThat(query.query().toString(), is(expected.toString())); + } + // optimizer doesn't know yet how to break down different multi count public void testCountFieldsAndAllWithFilter() { var plan = plannerOptimizer.plan(""" @@ -1092,8 +1336,6 @@ public void testMissingFieldsDoNotGetExtracted() { * estimatedRowSize[324] */ public void testSingleMatchFilterPushdown() { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); - var plan = plannerOptimizer.plan(""" from test | where first_name:"Anna" @@ -1124,8 +1366,6 @@ public void testSingleMatchFilterPushdown() { * [_doc{f}#22], limit[1000], sort[[FieldSort[field=emp_no{f}#12, direction=ASC, nulls=LAST]]] estimatedRowSize[336] */ public void testMultipleMatchFilterPushdown() { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); - String query = """ from test | where first_name:"Anna" and first_name:"Anneke" @@ -1170,4 +1410,8 @@ private Stat queryStatsFor(PhysicalPlan plan) { protected List filteredWarnings() { return withDefaultLimitWarning(super.filteredWarnings()); } + + private static KqlQueryBuilder kqlQueryBuilder(String query) { + return new KqlQueryBuilder(query); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index d9a0f9ad57fb1..57d0c7432f97b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; @@ -57,6 +58,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.ToPartial; import org.elasticsearch.xpack.esql.expression.function.aggregate.Values; import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -111,7 +113,7 @@ import org.elasticsearch.xpack.esql.plan.logical.UnaryPlan; import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -840,6 +842,265 @@ public void testReplaceStatsFilteredAggWithEvalSingleAggWithGroup() { var source = as(aggregate.child(), EsRelation.class); } + public void testExtractStatsCommonFilter() { + var plan = plan(""" + from test + | stats m = min(salary) where emp_no > 1, + max(salary) where emp_no > 1 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + assertThat(Expressions.name(filter.condition()), is("emp_no > 1")); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterUsingAliases() { + var plan = plan(""" + from test + | eval eno = emp_no + | drop emp_no + | stats min(salary) where eno > 1, + max(salary) where eno > 1 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + assertThat(Expressions.name(filter.condition()), is("eno > 1")); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterUsingJustOneAlias() { + var plan = plan(""" + from test + | eval eno = emp_no + | stats min(salary) where emp_no > 1, + max(salary) where eno > 1 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + var gt = as(filter.condition(), GreaterThan.class); + assertThat(Expressions.name(gt.left()), is("emp_no")); + assertTrue(gt.right().foldable()); + assertThat(gt.right().fold(), is(1)); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedNotSameFilter() { + var plan = plan(""" + from test + | stats min(salary) where emp_no > 1, + max(salary) where emp_no > 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + var source = as(agg.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedOnLackingFilter() { + var plan = plan(""" + from test + | stats min(salary), + max(salary) where emp_no > 2 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + var source = as(agg.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedWithGroups() { + var plan = plan(""" + from test + | stats min(salary) where emp_no > 2, + max(salary) where emp_no > 2 by first_name + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(3)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(BinaryComparison.class)); + + var source = as(agg.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterNormalizeAndCombineWithExistingFilter() { + var plan = plan(""" + from test + | where emp_no > 3 + | stats min(salary) where emp_no > 2, + max(salary) where 2 < emp_no + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), is(Literal.TRUE)); + + var filter = as(agg.child(), Filter.class); + assertThat(Expressions.name(filter.condition()), is("emp_no > 3")); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterInConjunction() { + var plan = plan(""" + from test + | stats min(salary) where emp_no > 2 and first_name == "John", + max(salary) where emp_no > 1 + 1 and length(last_name) < 19 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("first_name == \"John\"")); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("length(last_name) < 19")); + + var filter = as(agg.child(), Filter.class); + var gt = as(filter.condition(), GreaterThan.class); // name is "emp_no > 1 + 1" + assertThat(Expressions.name(gt.left()), is("emp_no")); + assertTrue(gt.right().foldable()); + assertThat(gt.right().fold(), is(2)); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterInConjunctionWithMultipleCommonConjunctions() { + var plan = plan(""" + from test + | stats min(salary) where emp_no < 10 and first_name == "John" and last_name == "Doe", + max(salary) where emp_no - 1 < 2 + 7 and length(last_name) < 19 and last_name == "Doe" + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("first_name == \"John\"")); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.filter()), is("length(last_name) < 19")); + + var filter = as(agg.child(), Filter.class); + var and = as(filter.condition(), And.class); + + var lt = as(and.left(), LessThan.class); + assertThat(Expressions.name(lt.left()), is("emp_no")); + assertTrue(lt.right().foldable()); + assertThat(lt.right().fold(), is(10)); + + var equals = as(and.right(), Equals.class); + assertThat(Expressions.name(equals.left()), is("last_name")); + assertTrue(equals.right().foldable()); + assertThat(equals.right().fold(), is(BytesRefs.toBytesRef("Doe"))); + + var source = as(filter.child(), EsRelation.class); + } + + public void testExtractStatsCommonFilterSkippedDueToDisjunction() { + // same query as in testExtractStatsCommonFilterInConjunction, except for the OR in the filter + var plan = plan(""" + from test + | stats min(salary) where emp_no > 2 OR first_name == "John", + max(salary) where emp_no > 1 + 1 and length(last_name) < 19 + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.aggregates().size(), is(2)); + + var alias = as(agg.aggregates().get(0), Alias.class); + var aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(Or.class)); + + alias = as(agg.aggregates().get(1), Alias.class); + aggFunc = as(alias.child(), AggregateFunction.class); + assertThat(aggFunc.filter(), instanceOf(And.class)); + + var source = as(agg.child(), EsRelation.class); + } + public void testQlComparisonOptimizationsApply() { var plan = plan(""" from test @@ -944,6 +1205,35 @@ public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUsedInAgg assertThat(Expressions.names(agg.groupings()), contains("first_name")); } + /** + * Expects + * Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[CATEGORIZE(first_name{f}#18) AS cat],[SUM(salary{f}#22,true[BOOLEAN]) AS s, cat{r}#10]] + * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] + */ + public void testCombineProjectionWithCategorizeGrouping() { + assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V4.isEnabled()); + + var plan = plan(""" + from test + | eval k = first_name, k1 = k + | stats s = sum(salary) by cat = CATEGORIZE(k) + | keep s, cat + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(agg.child(), instanceOf(EsRelation.class)); + + assertThat(Expressions.names(agg.aggregates()), contains("s", "cat")); + assertThat(Expressions.names(agg.groupings()), contains("cat")); + + var categorizeAlias = as(agg.groupings().get(0), Alias.class); + var categorize = as(categorizeAlias.child(), Categorize.class); + var categorizeField = as(categorize.field(), FieldAttribute.class); + assertThat(categorizeField.name(), is("first_name")); + } + /** * Expects * Limit[1000[INTEGER]] @@ -1882,7 +2172,7 @@ public void testLimitThenSortBeforeMvExpand() { mvExpand = as(topN.child(), MvExpand.class); var limit = as(mvExpand.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(7300)); - as(limit.child(), Row.class); + as(limit.child(), LocalRelation.class); } /** @@ -2027,7 +2317,7 @@ public void testSortMvExpand() { var expand = as(plan, MvExpand.class); assertThat(expand.limit(), equalTo(1000)); var topN = as(expand.child(), TopN.class); - var row = as(topN.child(), Row.class); + var row = as(topN.child(), LocalRelation.class); } /** @@ -2068,7 +2358,7 @@ public void testWhereMvExpand() { assertThat(expand.limit(), equalTo(1000)); var limit2 = as(expand.child(), Limit.class); assertThat(limit2.limit().fold(), is(1000)); - var row = as(limit2.child(), Row.class); + var row = as(limit2.child(), LocalRelation.class); } private static List orderNames(TopN topN) { @@ -2304,7 +2594,7 @@ public void testSimplifyRLikeMatchAll() { public void testRLikeWrongPattern() { String query = "from test | where first_name rlike \"(?i)(^|[^a-zA-Z0-9_-])nmap($|\\\\.)\""; - String error = "line 1:20: Invalid regex pattern for RLIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + String error = "line 1:19: Invalid regex pattern for RLIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + "[invalid range: from (95) cannot be > to (93)]"; ParsingException e = expectThrows(ParsingException.class, () -> plan(query)); assertThat(e.getMessage(), is(error)); @@ -2312,7 +2602,7 @@ public void testRLikeWrongPattern() { public void testLikeWrongPattern() { String query = "from test | where first_name like \"(?i)(^|[^a-zA-Z0-9_-])nmap($|\\\\.)\""; - String error = "line 1:20: Invalid pattern for LIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + String error = "line 1:19: Invalid pattern for LIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + "[Invalid sequence - escape character is not followed by special wildcard char]"; ParsingException e = expectThrows(ParsingException.class, () -> plan(query)); assertThat(e.getMessage(), is(error)); @@ -3286,7 +3576,7 @@ public void testMvExpandFoldable() { var filterProp = ((GreaterThan) filter.condition()).left(); assertTrue(expand.expanded().semanticEquals(filterProp)); assertFalse(expand.target().semanticEquals(filterProp)); - var row = as(expand.child(), Row.class); + var row = as(expand.child(), LocalRelation.class); } /** @@ -3305,7 +3595,7 @@ public void testRenameStatsDropGroup() { var limit = as(plan, Limit.class); var agg = as(limit.child(), Aggregate.class); assertThat(Expressions.names(agg.groupings()), contains("a")); - var row = as(agg.child(), Row.class); + var row = as(agg.child(), LocalRelation.class); } /** @@ -3324,7 +3614,7 @@ public void testMultipleRenameStatsDropGroup() { var limit = as(plan, Limit.class); var agg = as(limit.child(), Aggregate.class); assertThat(Expressions.names(agg.groupings()), contains("a", "b")); - var row = as(agg.child(), Row.class); + var row = as(agg.child(), LocalRelation.class); } /** @@ -3650,6 +3940,41 @@ public void testNestedExpressionsInGroups() { assertThat(eval.fields().get(0).name(), is("emp_no % 2")); } + /** + * Expects + * Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[CATEGORIZE(CATEGORIZE(CONCAT(first_name, "abc")){r$}#18) AS CATEGORIZE(CONCAT(first_name, "abc"))],[CO + * UNT(salary{f}#13,true[BOOLEAN]) AS c, CATEGORIZE(CONCAT(first_name, "abc")){r}#3]] + * \_Eval[[CONCAT(first_name{f}#9,[61 62 63][KEYWORD]) AS CATEGORIZE(CONCAT(first_name, "abc"))]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testNestedExpressionsInGroupsWithCategorize() { + assumeTrue("requires Categorize capability", EsqlCapabilities.Cap.CATEGORIZE_V4.isEnabled()); + + var plan = optimizedPlan(""" + from test + | stats c = count(salary) by CATEGORIZE(CONCAT(first_name, "abc")) + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var groupings = agg.groupings(); + var categorizeAlias = as(groupings.get(0), Alias.class); + var categorize = as(categorizeAlias.child(), Categorize.class); + var aggs = agg.aggregates(); + assertThat(aggs.get(1), is(categorizeAlias.toAttribute())); + + var eval = as(agg.child(), Eval.class); + assertThat(eval.fields(), hasSize(1)); + var evalFieldAlias = as(eval.fields().get(0), Alias.class); + var evalField = as(evalFieldAlias.child(), Concat.class); + + assertThat(evalFieldAlias.name(), is("CATEGORIZE(CONCAT(first_name, \"abc\"))")); + assertThat(categorize.field(), is(evalFieldAlias.toAttribute())); + assertThat(evalField.source().text(), is("CONCAT(first_name, \"abc\")")); + assertThat(categorizeAlias.source(), is(evalFieldAlias.source())); + } + /** * Expects * Limit[1000[INTEGER]] @@ -5365,6 +5690,7 @@ protected List filteredWarnings() { * 9]]], BytesRefVectorBlock[vector=BytesRefArrayVector[positions=10]]]] * } */ + @AwaitsFix(bugUrl = "lookup functionality is not yet implemented") public void testLookupSimple() { String query = """ FROM test @@ -5391,7 +5717,7 @@ public void testLookupSimple() { var limit = as(left.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(1000)); - assertThat(join.config().type(), equalTo(JoinType.LEFT)); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); assertThat(join.config().matchFields().stream().map(Object::toString).toList(), matchesList().item(startsWith("int{r}"))); assertThat(join.config().leftFields().size(), equalTo(1)); assertThat(join.config().rightFields().size(), equalTo(1)); @@ -5444,6 +5770,7 @@ public void testLookupSimple() { * 9]]], BytesRefVectorBlock[vector=BytesRefArrayVector[positions=10]]]] * } */ + @AwaitsFix(bugUrl = "lookup functionality is not yet implemented") public void testLookupStats() { String query = """ FROM test @@ -5479,7 +5806,7 @@ public void testLookupStats() { assertThat(left.output().toString(), containsString("int{r}")); as(left.child(), EsRelation.class); - assertThat(join.config().type(), equalTo(JoinType.LEFT)); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); assertThat(join.config().matchFields().stream().map(Object::toString).toList(), matchesList().item(startsWith("int{r}"))); assertThat(join.config().leftFields().size(), equalTo(1)); assertThat(join.config().rightFields().size(), equalTo(1)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index eb115ed7b2948..1f131f79c3d0e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils.TestConfigurableSearchStats; import org.elasticsearch.xpack.esql.EsqlTestUtils.TestConfigurableSearchStats.Config; import org.elasticsearch.xpack.esql.VerificationException; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; @@ -63,6 +64,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialAggregateFunction; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.esql.expression.function.fulltext.Match; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialContains; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialDisjoint; @@ -93,7 +95,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.TopN; import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; @@ -114,7 +116,6 @@ import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import org.elasticsearch.xpack.esql.planner.PlannerUtils; @@ -2751,7 +2752,7 @@ public void testSpatialTypesAndStatsUseDocValuesNestedLiteral() { assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); var eval = as(agg.child(), EvalExec.class); - as(eval.child(), RowExec.class); + as(eval.child(), LocalSourceExec.class); // Now optimize the plan and assert the same plan again, since no FieldExtractExec is added var optimized = optimizedPlan(plan); @@ -2765,7 +2766,7 @@ public void testSpatialTypesAndStatsUseDocValuesNestedLiteral() { assertThat("No groupings in aggregation", agg.groupings().size(), equalTo(0)); assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, false); eval = as(agg.child(), EvalExec.class); - as(eval.child(), RowExec.class); + as(eval.child(), LocalSourceExec.class); } /** @@ -6423,11 +6424,12 @@ public void testMaxQueryDepthPlusExpressionDepth() { assertThat(e.getMessage(), containsString("ESQL statement exceeded the maximum query depth allowed (" + MAX_QUERY_DEPTH + ")")); } + @AwaitsFix(bugUrl = "lookup functionality is not yet implemented") public void testLookupSimple() { String query = """ FROM test | RENAME languages AS int - | LOOKUP int_number_names ON int"""; + | LOOKUP_🐔 int_number_names ON int"""; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP' expecting {")); @@ -6468,18 +6470,19 @@ public void testLookupSimple() { * \_EsQueryExec[...] * } */ + @AwaitsFix(bugUrl = "lookup functionality is not yet implemented") public void testLookupThenProject() { String query = """ FROM employees | SORT emp_no | LIMIT 4 | RENAME languages AS int - | LOOKUP int_number_names ON int + | LOOKUP_🐔 int_number_names ON int | RENAME int AS languages, name AS lang_name | KEEP emp_no, languages, lang_name"""; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); - assertThat(e.getMessage(), containsString("line 5:3: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 5:3: mismatched input 'LOOKUP_🐔' expecting {")); return; } PhysicalPlan plan = optimizedPlan(physicalPlan(query)); @@ -6526,17 +6529,18 @@ public void testLookupThenProject() { * \_LocalRelation[[int{f}#24, name{f}#25],[...]] * } */ + @AwaitsFix(bugUrl = "lookup functionality is not yet implemented") public void testLookupThenTopN() { String query = """ FROM employees | RENAME languages AS int - | LOOKUP int_number_names ON int + | LOOKUP_🐔 int_number_names ON int | RENAME name AS languages | KEEP languages, emp_no | SORT languages ASC, emp_no ASC"""; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> analyze(query)); - assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 3:3: mismatched input 'LOOKUP_🐔' expecting {")); return; } var plan = physicalPlan(query); @@ -6553,7 +6557,7 @@ public void testLookupThenTopN() { matchesList().item(startsWith("name{f}")).item(startsWith("emp_no{f}")) ); Join join = as(innerTopN.child(), Join.class); - assertThat(join.config().type(), equalTo(JoinType.LEFT)); + assertThat(join.config().type(), equalTo(JoinTypes.LEFT)); assertMap(join.config().matchFields().stream().map(Objects::toString).toList(), matchesList().item(startsWith("int{r}"))); Project innerProject = as(join.left(), Project.class); @@ -6579,6 +6583,66 @@ public void testLookupThenTopN() { ); } + public void testScore() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var plan = physicalPlan(""" + from test metadata _score + | where match(first_name, "john") + | keep _score + """); + + ProjectExec outerProject = as(plan, ProjectExec.class); + LimitExec limitExec = as(outerProject.child(), LimitExec.class); + ExchangeExec exchange = as(limitExec.child(), ExchangeExec.class); + FragmentExec frag = as(exchange.child(), FragmentExec.class); + + LogicalPlan opt = logicalOptimizer.optimize(frag.fragment()); + Limit limit = as(opt, Limit.class); + Filter filter = as(limit.child(), Filter.class); + + Match match = as(filter.condition(), Match.class); + assertTrue(match.field() instanceof FieldAttribute); + assertEquals("first_name", ((FieldAttribute) match.field()).field().getName()); + + EsRelation esRelation = as(filter.child(), EsRelation.class); + assertTrue(esRelation.optimized()); + assertTrue(esRelation.resolved()); + assertTrue(esRelation.output().stream().anyMatch(a -> a.name().equals(MetadataAttribute.SCORE) && a instanceof MetadataAttribute)); + } + + public void testScoreTopN() { + assumeTrue("'METADATA _score' is disabled", EsqlCapabilities.Cap.METADATA_SCORE.isEnabled()); + var plan = physicalPlan(""" + from test metadata _score + | where match(first_name, "john") + | keep _score + | sort _score desc + """); + + ProjectExec projectExec = as(plan, ProjectExec.class); + TopNExec topNExec = as(projectExec.child(), TopNExec.class); + ExchangeExec exchange = as(topNExec.child(), ExchangeExec.class); + FragmentExec frag = as(exchange.child(), FragmentExec.class); + + LogicalPlan opt = logicalOptimizer.optimize(frag.fragment()); + TopN topN = as(opt, TopN.class); + List order = topN.order(); + Order scoreOrer = order.getFirst(); + assertEquals(Order.OrderDirection.DESC, scoreOrer.direction()); + Expression child = scoreOrer.child(); + assertTrue(child instanceof MetadataAttribute ma && ma.name().equals(MetadataAttribute.SCORE)); + Filter filter = as(topN.child(), Filter.class); + + Match match = as(filter.condition(), Match.class); + assertTrue(match.field() instanceof FieldAttribute); + assertEquals("first_name", ((FieldAttribute) match.field()).field().getName()); + + EsRelation esRelation = as(filter.child(), EsRelation.class); + assertTrue(esRelation.optimized()); + assertTrue(esRelation.resolved()); + assertTrue(esRelation.output().stream().anyMatch(a -> a.name().equals(MetadataAttribute.SCORE) && a instanceof MetadataAttribute)); + } + @SuppressWarnings("SameParameterValue") private static void assertFilterCondition( Filter filter, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java index 595f0aaa91f0d..9fe479dbb8625 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/TestPlannerOptimizer.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.analysis.Analyzer; -import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -23,19 +22,22 @@ public class TestPlannerOptimizer { private final Analyzer analyzer; private final LogicalPlanOptimizer logicalOptimizer; private final PhysicalPlanOptimizer physicalPlanOptimizer; - private final EsqlFunctionRegistry functionRegistry; private final Mapper mapper; private final Configuration config; public TestPlannerOptimizer(Configuration config, Analyzer analyzer) { + this(config, analyzer, new LogicalPlanOptimizer(new LogicalOptimizerContext(config))); + } + + public TestPlannerOptimizer(Configuration config, Analyzer analyzer, LogicalPlanOptimizer logicalOptimizer) { this.analyzer = analyzer; this.config = config; + this.logicalOptimizer = logicalOptimizer; parser = new EsqlParser(); - logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(config)); physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); - functionRegistry = new EsqlFunctionRegistry(); mapper = new Mapper(); + } public PhysicalPlan plan(String query) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java index 89117b5d4e729..ae31576184938 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/FoldNullTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; +import org.elasticsearch.xpack.esql.expression.function.grouping.Categorize; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToString; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateExtract; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; @@ -267,6 +269,17 @@ public void testNullFoldableDoesNotApplyToIsNullAndNotNull() { } } + public void testNullBucketGetsFolded() { + FoldNull foldNull = new FoldNull(); + assertEquals(NULL, foldNull.rule(new Bucket(EMPTY, NULL, NULL, NULL, NULL))); + } + + public void testNullCategorizeGroupingNotFolded() { + FoldNull foldNull = new FoldNull(); + Categorize categorize = new Categorize(EMPTY, NULL); + assertEquals(categorize, foldNull.rule(categorize)); + } + private void assertNullLiteral(Expression expression) { assertEquals(Literal.class, expression.getClass()); assertNull(expression.fold()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSourceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSourceTests.java index 98f0af8e4b8e6..2429bcb1a1b04 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSourceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/PushTopNToSourceTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -64,6 +65,13 @@ public void testSimpleSortField() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleScoreSortField() { + // FROM index METADATA _score | SORT _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false).scoreSort().limit(10); + assertPushdownSort(query); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortMultipleFields() { // FROM index | SORT field, integer, double | LIMIT 10 var query = from("index").sort("field").sort("integer").sort("double").limit(10); @@ -71,6 +79,13 @@ public void testSimpleSortMultipleFields() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleSortMultipleFieldsAndScore() { + // FROM index | SORT field, integer, double, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false).sort("field").sort("integer").sort("double").scoreSort().limit(10); + assertPushdownSort(query); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortFieldAndEvalLiteral() { // FROM index | EVAL x = 1 | SORT field | LIMIT 10 var query = from("index").eval("x", e -> e.i(1)).sort("field").limit(10); @@ -78,6 +93,13 @@ public void testSimpleSortFieldAndEvalLiteral() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleSortFieldScoreAndEvalLiteral() { + // FROM index METADATA _score | EVAL x = 1 | SORT field, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false).eval("x", e -> e.i(1)).sort("field").scoreSort().limit(10); + assertPushdownSort(query, List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortFieldWithAlias() { // FROM index | EVAL x = field | SORT field | LIMIT 10 var query = from("index").eval("x", b -> b.field("field")).sort("field").limit(10); @@ -98,6 +120,21 @@ public void testSimpleSortMultipleFieldsWithAliases() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleSortMultipleFieldsWithAliasesAndScore() { + // FROM index | EVAL x = field, y = integer, z = double | SORT field, integer, double, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("x", b -> b.field("field")) + .eval("y", b -> b.field("integer")) + .eval("z", b -> b.field("double")) + .sort("field") + .sort("integer") + .sort("double") + .scoreSort() + .limit(10); + assertPushdownSort(query, List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortFieldAsAlias() { // FROM index | EVAL x = field | SORT x | LIMIT 10 var query = from("index").eval("x", b -> b.field("field")).sort("x").limit(10); @@ -105,6 +142,13 @@ public void testSimpleSortFieldAsAlias() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleSortFieldAsAliasAndScore() { + // FROM index METADATA _score | EVAL x = field | SORT x, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false).eval("x", b -> b.field("field")).sort("x").scoreSort().limit(10); + assertPushdownSort(query, Map.of("x", "field"), List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortFieldAndEvalSumLiterals() { // FROM index | EVAL sum = 1 + 2 | SORT field | LIMIT 10 var query = from("index").eval("sum", b -> b.add(b.i(1), b.i(2))).sort("field").limit(10); @@ -112,6 +156,17 @@ public void testSimpleSortFieldAndEvalSumLiterals() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleSortFieldAndEvalSumLiteralsAndScore() { + // FROM index METADATA _score | EVAL sum = 1 + 2 | SORT field, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("sum", b -> b.add(b.i(1), b.i(2))) + .sort("field") + .scoreSort() + .limit(10); + assertPushdownSort(query, List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortFieldAndEvalSumLiteralAndField() { // FROM index | EVAL sum = 1 + integer | SORT integer | LIMIT 10 var query = from("index").eval("sum", b -> b.add(b.i(1), b.field("integer"))).sort("integer").limit(10); @@ -119,6 +174,17 @@ public void testSimpleSortFieldAndEvalSumLiteralAndField() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSimpleSortFieldAndEvalSumLiteralAndFieldAndScore() { + // FROM index METADATA _score | EVAL sum = 1 + integer | SORT integer, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("sum", b -> b.add(b.i(1), b.field("integer"))) + .sort("integer") + .scoreSort() + .limit(10); + assertPushdownSort(query, List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSimpleSortEvalSumLiteralAndField() { // FROM index | EVAL sum = 1 + integer | SORT sum | LIMIT 10 var query = from("index").eval("sum", b -> b.add(b.i(1), b.field("integer"))).sort("sum").limit(10); @@ -144,6 +210,14 @@ public void testSortGeoPointField() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoPointFieldAnsScore() { + // FROM index METADATA _score | SORT location, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false).sort("location", Order.OrderDirection.ASC).scoreSort().limit(10); + // NOTE: while geo_point is not sortable, this is checked during logical planning and the physical planner does not know or care + assertPushdownSort(query); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSortGeoDistanceFunction() { // FROM index | EVAL distance = ST_DISTANCE(location, POINT(1 2)) | SORT distance | LIMIT 10 var query = from("index").eval("distance", b -> b.distance("location", "POINT(1 2)")) @@ -154,6 +228,18 @@ public void testSortGeoDistanceFunction() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoDistanceFunctionAndScore() { + // FROM index METADATA _score | EVAL distance = ST_DISTANCE(location, POINT(1 2)) | SORT distance, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("distance", b -> b.distance("location", "POINT(1 2)")) + .sort("distance", Order.OrderDirection.ASC) + .scoreSort() + .limit(10); + // The pushed-down sort will use the underlying field 'location', not the sorted reference field 'distance' + assertPushdownSort(query, Map.of("distance", "location"), List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSortGeoDistanceFunctionInverted() { // FROM index | EVAL distance = ST_DISTANCE(POINT(1 2), location) | SORT distance | LIMIT 10 var query = from("index").eval("distance", b -> b.distance("POINT(1 2)", "location")) @@ -164,6 +250,18 @@ public void testSortGeoDistanceFunctionInverted() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoDistanceFunctionInvertedAndScore() { + // FROM index METADATA _score | EVAL distance = ST_DISTANCE(POINT(1 2), location) | SORT distance, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("distance", b -> b.distance("POINT(1 2)", "location")) + .sort("distance", Order.OrderDirection.ASC) + .scoreSort() + .limit(10); + // The pushed-down sort will use the underlying field 'location', not the sorted reference field 'distance' + assertPushdownSort(query, Map.of("distance", "location"), List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSortGeoDistanceFunctionLiterals() { // FROM index | EVAL distance = ST_DISTANCE(POINT(2 1), POINT(1 2)) | SORT distance | LIMIT 10 var query = from("index").eval("distance", b -> b.distance("POINT(2 1)", "POINT(1 2)")) @@ -174,6 +272,18 @@ public void testSortGeoDistanceFunctionLiterals() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoDistanceFunctionLiteralsAndScore() { + // FROM index METADATA _score | EVAL distance = ST_DISTANCE(POINT(2 1), POINT(1 2)) | SORT distance, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("distance", b -> b.distance("POINT(2 1)", "POINT(1 2)")) + .sort("distance", Order.OrderDirection.ASC) + .scoreSort() + .limit(10); + // The pushed-down sort will use the underlying field 'location', not the sorted reference field 'distance' + assertNoPushdownSort(query, "sort on foldable distance function"); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSortGeoDistanceFunctionAndFieldsWithAliases() { // FROM index | EVAL distance = ST_DISTANCE(location, POINT(1 2)), x = field | SORT distance, field, integer | LIMIT 10 var query = from("index").eval("distance", b -> b.distance("location", "POINT(1 2)")) @@ -187,6 +297,21 @@ public void testSortGeoDistanceFunctionAndFieldsWithAliases() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoDistanceFunctionAndFieldsWithAliasesAndScore() { + // FROM index | EVAL distance = ST_DISTANCE(location, POINT(1 2)), x = field | SORT distance, field, integer, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("distance", b -> b.distance("location", "POINT(1 2)")) + .eval("x", b -> b.field("field")) + .sort("distance", Order.OrderDirection.ASC) + .sort("field", Order.OrderDirection.DESC) + .sort("integer", Order.OrderDirection.DESC) + .scoreSort() + .limit(10); + // The pushed-down sort will use the underlying field 'location', not the sorted reference field 'distance' + assertPushdownSort(query, query.orders, Map.of("distance", "location"), List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSortGeoDistanceFunctionAndFieldsAndAliases() { // FROM index | EVAL distance = ST_DISTANCE(location, POINT(1 2)), x = field | SORT distance, x, integer | LIMIT 10 var query = from("index").eval("distance", b -> b.distance("location", "POINT(1 2)")) @@ -200,6 +325,21 @@ public void testSortGeoDistanceFunctionAndFieldsAndAliases() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoDistanceFunctionAndFieldsAndAliasesAndScore() { + // FROM index | EVAL distance = ST_DISTANCE(location, POINT(1 2)), x = field | SORT distance, x, integer, _score | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("distance", b -> b.distance("location", "POINT(1 2)")) + .eval("x", b -> b.field("field")) + .sort("distance", Order.OrderDirection.ASC) + .sort("x", Order.OrderDirection.DESC) + .sort("integer", Order.OrderDirection.DESC) + .scoreSort() + .limit(10); + // The pushed-down sort will use the underlying field 'location', not the sorted reference field 'distance' + assertPushdownSort(query, query.orders, Map.of("distance", "location", "x", "field"), List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + public void testSortGeoDistanceFunctionAndFieldsAndManyAliases() { // FROM index // | EVAL loc = location, loc2 = loc, loc3 = loc2, distance = ST_DISTANCE(loc3, POINT(1 2)), x = field @@ -219,6 +359,27 @@ public void testSortGeoDistanceFunctionAndFieldsAndManyAliases() { assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); } + public void testSortGeoDistanceFunctionAndFieldsAndManyAliasesAndScore() { + // FROM index METADATA _score + // | EVAL loc = location, loc2 = loc, loc3 = loc2, distance = ST_DISTANCE(loc3, POINT(1 2)), x = field + // | SORT distance, x, integer, _score + // | LIMIT 10 + var query = from("index").metadata("_score", DOUBLE, false) + .eval("loc", b -> b.field("location")) + .eval("loc2", b -> b.ref("loc")) + .eval("loc3", b -> b.ref("loc2")) + .eval("distance", b -> b.distance("loc3", "POINT(1 2)")) + .eval("x", b -> b.field("field")) + .sort("distance", Order.OrderDirection.ASC) + .sort("x", Order.OrderDirection.DESC) + .sort("integer", Order.OrderDirection.DESC) + .scoreSort() + .limit(10); + // The pushed-down sort will use the underlying field 'location', not the sorted reference field 'distance' + assertPushdownSort(query, Map.of("distance", "location", "x", "field"), List.of(EvalExec.class, EsQueryExec.class)); + assertNoPushdownSort(query.asTimeSeries(), "for time series index mode"); + } + private static void assertPushdownSort(TestPhysicalPlanBuilder builder) { assertPushdownSort(builder, null, List.of(EsQueryExec.class)); } @@ -289,9 +450,12 @@ private static void assertPushdownSort( assertThat("Expect sorts count to match", sorts.size(), is(expectedSorts.size())); for (int i = 0; i < expectedSorts.size(); i++) { String name = ((Attribute) expectedSorts.get(i).child()).name(); - String fieldName = sorts.get(i).field().fieldName(); - assertThat("Expect sort[" + i + "] name to match", fieldName, is(sortName(name, fieldMap))); - assertThat("Expect sort[" + i + "] direction to match", sorts.get(i).direction(), is(expectedSorts.get(i).direction())); + EsQueryExec.Sort sort = sorts.get(i); + if (sort.field() != null) { + String fieldName = sort.field().fieldName(); + assertThat("Expect sort[" + i + "] name to match", fieldName, is(sortName(name, fieldMap))); + } + assertThat("Expect sort[" + i + "] direction to match", sort.direction(), is(expectedSorts.get(i).direction())); } } @@ -317,6 +481,7 @@ static class TestPhysicalPlanBuilder { private final String index; private final LinkedHashMap fields; private final LinkedHashMap refs; + private final LinkedHashMap metadata; private IndexMode indexMode; private final List aliases = new ArrayList<>(); private final List orders = new ArrayList<>(); @@ -327,6 +492,7 @@ private TestPhysicalPlanBuilder(String index, IndexMode indexMode) { this.indexMode = indexMode; this.fields = new LinkedHashMap<>(); this.refs = new LinkedHashMap<>(); + this.metadata = new LinkedHashMap<>(); addSortableFieldAttributes(this.fields); } @@ -346,6 +512,11 @@ static TestPhysicalPlanBuilder from(String index) { return new TestPhysicalPlanBuilder(index, IndexMode.STANDARD); } + TestPhysicalPlanBuilder metadata(String metadataAttribute, DataType dataType, boolean searchable) { + metadata.put(metadataAttribute, new MetadataAttribute(Source.EMPTY, metadataAttribute, dataType, searchable)); + return this; + } + public TestPhysicalPlanBuilder eval(Alias... aliases) { if (orders.isEmpty() == false) { throw new IllegalArgumentException("Eval must be before sort"); @@ -376,6 +547,22 @@ public TestPhysicalPlanBuilder sort(String field) { return sort(field, Order.OrderDirection.ASC); } + public TestPhysicalPlanBuilder scoreSort(Order.OrderDirection direction) { + orders.add( + new Order( + Source.EMPTY, + MetadataAttribute.create(Source.EMPTY, MetadataAttribute.SCORE), + direction, + Order.NullsPosition.LAST + ) + ); + return this; + } + + public TestPhysicalPlanBuilder scoreSort() { + return scoreSort(Order.OrderDirection.DESC); + } + public TestPhysicalPlanBuilder sort(String field, Order.OrderDirection direction) { Attribute attr = refs.get(field); if (attr == null) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index 67b4dd71260aa..710637c05a900 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -134,7 +134,7 @@ public void testStringLiteralsExceptions() { ); var number = "1" + IntStream.range(0, 309).mapToObj(ignored -> "0").collect(Collectors.joining()); - assertParsingException(() -> parse("row foo == " + number), "line 1:13: Number [" + number + "] is too large"); + assertParsingException(() -> parse("row foo == " + number), "line 1:12: Number [" + number + "] is too large"); } public void testBooleanLiteralsCondition() { @@ -431,7 +431,7 @@ public void testDatePeriodLiterals() { } public void testUnknownNumericQualifier() { - assertParsingException(() -> whereExpression("1 decade"), "Unexpected time interval qualifier: 'decade'"); + assertParsingException(() -> whereExpression("1 decade"), "Unexpected temporal unit: 'decade'"); } public void testQualifiedDecimalLiteral() { @@ -442,20 +442,20 @@ public void testOverflowingValueForDuration() { for (String unit : List.of("milliseconds", "seconds", "minutes", "hours")) { assertParsingException( () -> parse("row x = 9223372036854775808 " + unit), // unsigned_long (Long.MAX_VALUE + 1) - "line 1:10: Number [9223372036854775808] outside of [" + unit + "] range" + "line 1:9: Number [9223372036854775808] outside of [" + unit + "] range" ); assertParsingException( () -> parse("row x = 18446744073709551616 " + unit), // double (UNSIGNED_LONG_MAX + 1) - "line 1:10: Number [18446744073709551616] outside of [" + unit + "] range" + "line 1:9: Number [18446744073709551616] outside of [" + unit + "] range" ); } assertParsingException( () -> parse("row x = 153722867280912931 minutes"), // Long.MAX_VALUE / 60 + 1 - "line 1:10: Number [153722867280912931] outside of [minutes] range" + "line 1:9: Number [153722867280912931] outside of [minutes] range" ); assertParsingException( () -> parse("row x = 2562047788015216 hours"), // Long.MAX_VALUE / 3600 + 1 - "line 1:10: Number [2562047788015216] outside of [hours] range" + "line 1:9: Number [2562047788015216] outside of [hours] range" ); } @@ -463,12 +463,12 @@ public void testOverflowingValueForPeriod() { for (String unit : List.of("days", "weeks", "months", "years")) { assertParsingException( () -> parse("row x = 2147483648 " + unit), // long (Integer.MAX_VALUE + 1) - "line 1:10: Number [2147483648] outside of [" + unit + "] range" + "line 1:9: Number [2147483648] outside of [" + unit + "] range" ); } assertParsingException( () -> parse("row x = 306783379 weeks"), // Integer.MAX_VALUE / 7 + 1 - "line 1:10: Number [306783379] outside of [weeks] range" + "line 1:9: Number [306783379] outside of [weeks] range" ); } @@ -544,7 +544,7 @@ public void testWildcardProjectAwayPatterns() { } public void testForbidWildcardProjectAway() { - assertParsingException(() -> dropExpression("foo, *"), "line 1:21: Removing all fields is not allowed [*]"); + assertParsingException(() -> dropExpression("foo, *"), "line 1:20: Removing all fields is not allowed [*]"); } public void testForbidMultipleIncludeStar() { @@ -608,7 +608,7 @@ public void testMultipleProjectPatterns() { } public void testForbidWildcardProjectRename() { - assertParsingException(() -> renameExpression("b* AS a*"), "line 1:18: Using wildcards [*] in RENAME is not allowed [b* AS a*]"); + assertParsingException(() -> renameExpression("b* AS a*"), "line 1:17: Using wildcards [*] in RENAME is not allowed [b* AS a*]"); } public void testSimplifyInWithSingleElementList() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 0f46c1f44e8d3..69c00eb395fdb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -491,25 +491,25 @@ private void clusterAndIndexAsIndexPattern(String command, String clusterAndInde public void testStringAsLookupIndexPattern() { assumeTrue("requires snapshot build", Build.current().isSnapshot()); - assertStringAsLookupIndexPattern("foo", "ROW x = 1 | LOOKUP \"foo\" ON j"); + assertStringAsLookupIndexPattern("foo", "ROW x = 1 | LOOKUP_🐔 \"foo\" ON j"); assertStringAsLookupIndexPattern("test-*", """ - ROW x = 1 | LOOKUP "test-*" ON j + ROW x = 1 | LOOKUP_🐔 "test-*" ON j """); - assertStringAsLookupIndexPattern("test-*", "ROW x = 1 | LOOKUP test-* ON j"); - assertStringAsLookupIndexPattern("123-test@foo_bar+baz1", "ROW x = 1 | LOOKUP 123-test@foo_bar+baz1 ON j"); + assertStringAsLookupIndexPattern("test-*", "ROW x = 1 | LOOKUP_🐔 test-* ON j"); + assertStringAsLookupIndexPattern("123-test@foo_bar+baz1", "ROW x = 1 | LOOKUP_🐔 123-test@foo_bar+baz1 ON j"); assertStringAsLookupIndexPattern("foo, test-*, abc, xyz", """ - ROW x = 1 | LOOKUP "foo, test-*, abc, xyz" ON j + ROW x = 1 | LOOKUP_🐔 "foo, test-*, abc, xyz" ON j """); - assertStringAsLookupIndexPattern("", "ROW x = 1 | LOOKUP ON j"); + assertStringAsLookupIndexPattern("", "ROW x = 1 | LOOKUP_🐔 ON j"); assertStringAsLookupIndexPattern( "", - "ROW x = 1 | LOOKUP \"\" ON j" + "ROW x = 1 | LOOKUP_🐔 \"\" ON j" ); - assertStringAsLookupIndexPattern("foo", "ROW x = 1 | LOOKUP \"\"\"foo\"\"\" ON j"); - assertStringAsLookupIndexPattern("`backtick`", "ROW x = 1 | LOOKUP `backtick` ON j"); - assertStringAsLookupIndexPattern("``multiple`back``ticks```", "ROW x = 1 | LOOKUP ``multiple`back``ticks``` ON j"); - assertStringAsLookupIndexPattern(".dot", "ROW x = 1 | LOOKUP .dot ON j"); + assertStringAsLookupIndexPattern("foo", "ROW x = 1 | LOOKUP_🐔 \"\"\"foo\"\"\" ON j"); + assertStringAsLookupIndexPattern("`backtick`", "ROW x = 1 | LOOKUP_🐔 `backtick` ON j"); + assertStringAsLookupIndexPattern("``multiple`back``ticks```", "ROW x = 1 | LOOKUP_🐔 ``multiple`back``ticks``` ON j"); + assertStringAsLookupIndexPattern(".dot", "ROW x = 1 | LOOKUP_🐔 .dot ON j"); clusterAndIndexAsLookupIndexPattern("cluster:index"); clusterAndIndexAsLookupIndexPattern("cluster:.index"); clusterAndIndexAsLookupIndexPattern("cluster*:index*"); @@ -519,16 +519,16 @@ public void testStringAsLookupIndexPattern() { } private void clusterAndIndexAsLookupIndexPattern(String clusterAndIndex) { - assertStringAsLookupIndexPattern(clusterAndIndex, "ROW x = 1 | LOOKUP " + clusterAndIndex + " ON j"); - assertStringAsLookupIndexPattern(clusterAndIndex, "ROW x = 1 | LOOKUP \"" + clusterAndIndex + "\"" + " ON j"); + assertStringAsLookupIndexPattern(clusterAndIndex, "ROW x = 1 | LOOKUP_🐔 " + clusterAndIndex + " ON j"); + assertStringAsLookupIndexPattern(clusterAndIndex, "ROW x = 1 | LOOKUP_🐔 \"" + clusterAndIndex + "\"" + " ON j"); } public void testInvalidCharacterInIndexPattern() { Map commands = new HashMap<>(); - commands.put("FROM {}", "line 1:7: "); + commands.put("FROM {}", "line 1:6: "); if (Build.current().isSnapshot()) { - commands.put("METRICS {}", "line 1:10: "); - commands.put("ROW x = 1 | LOOKUP {} ON j", "line 1:21: "); + commands.put("METRICS {}", "line 1:9: "); + commands.put("ROW x = 1 | LOOKUP_🐔 {} ON j", "line 1:22: "); } String lineNumber; for (String command : commands.keySet()) { @@ -568,11 +568,11 @@ public void testInvalidCharacterInIndexPattern() { // comma separated indices, with exclusions // Invalid index names after removing exclusion fail, when there is no index name with wildcard before it for (String command : commands.keySet()) { - if (command.contains("LOOKUP")) { + if (command.contains("LOOKUP_🐔")) { continue; } - lineNumber = command.contains("FROM") ? "line 1:21: " : "line 1:24: "; + lineNumber = command.contains("FROM") ? "line 1:20: " : "line 1:23: "; expectInvalidIndexNameErrorWithLineNumber(command, "indexpattern, --indexpattern", lineNumber, "-indexpattern"); expectInvalidIndexNameErrorWithLineNumber(command, "indexpattern, \"--indexpattern\"", lineNumber, "-indexpattern"); expectInvalidIndexNameErrorWithLineNumber(command, "\"indexpattern, --indexpattern\"", commands.get(command), "-indexpattern"); @@ -582,10 +582,10 @@ public void testInvalidCharacterInIndexPattern() { // Invalid index names, except invalid DateMath, are ignored if there is an index name with wildcard before it String dateMathError = "unit [D] not supported for date math [/D]"; for (String command : commands.keySet()) { - if (command.contains("LOOKUP")) { + if (command.contains("LOOKUP_🐔")) { continue; } - lineNumber = command.contains("FROM") ? "line 1:10: " : "line 1:13: "; + lineNumber = command.contains("FROM") ? "line 1:9: " : "line 1:12: "; clustersAndIndices(command, "*", "-index#pattern"); clustersAndIndices(command, "index*", "-index#pattern"); clustersAndIndices(command, "*", "-<--logstash-{now/M{yyyy.MM}}>"); @@ -646,17 +646,17 @@ public void testInvalidQuotingAsMetricsIndexPattern() { public void testInvalidQuotingAsLookupIndexPattern() { assumeTrue("requires snapshot builds", Build.current().isSnapshot()); - expectError("ROW x = 1 | LOOKUP \"foo ON j", ": token recognition error at: '\"foo ON j'"); - expectError("ROW x = 1 | LOOKUP \"\"\"foo ON j", ": token recognition error at: '\"foo ON j'"); + expectError("ROW x = 1 | LOOKUP_🐔 \"foo ON j", ": token recognition error at: '\"foo ON j'"); + expectError("ROW x = 1 | LOOKUP_🐔 \"\"\"foo ON j", ": token recognition error at: '\"foo ON j'"); - expectError("ROW x = 1 | LOOKUP foo\" ON j", ": token recognition error at: '\" ON j'"); - expectError("ROW x = 1 | LOOKUP foo\"\"\" ON j", ": token recognition error at: '\" ON j'"); + expectError("ROW x = 1 | LOOKUP_🐔 foo\" ON j", ": token recognition error at: '\" ON j'"); + expectError("ROW x = 1 | LOOKUP_🐔 foo\"\"\" ON j", ": token recognition error at: '\" ON j'"); - expectError("ROW x = 1 | LOOKUP \"foo\"bar\" ON j", ": token recognition error at: '\" ON j'"); - expectError("ROW x = 1 | LOOKUP \"foo\"\"bar\" ON j", ": extraneous input '\"bar\"' expecting 'on'"); + expectError("ROW x = 1 | LOOKUP_🐔 \"foo\"bar\" ON j", ": token recognition error at: '\" ON j'"); + expectError("ROW x = 1 | LOOKUP_🐔 \"foo\"\"bar\" ON j", ": extraneous input '\"bar\"' expecting 'on'"); - expectError("ROW x = 1 | LOOKUP \"\"\"foo\"\"\"bar\"\"\" ON j", ": mismatched input 'bar' expecting 'on'"); - expectError("ROW x = 1 | LOOKUP \"\"\"foo\"\"\"\"\"\"bar\"\"\" ON j", "line 1:31: mismatched input '\"bar\"' expecting 'on'"); + expectError("ROW x = 1 | LOOKUP_🐔 \"\"\"foo\"\"\"bar\"\"\" ON j", ": mismatched input 'bar' expecting 'on'"); + expectError("ROW x = 1 | LOOKUP_🐔 \"\"\"foo\"\"\"\"\"\"bar\"\"\" ON j", ": mismatched input '\"bar\"' expecting 'on'"); } public void testIdentifierAsFieldName() { @@ -885,18 +885,18 @@ public void testSuggestAvailableProcessingCommandsOnParsingError() { public void testDeprecatedIsNullFunction() { expectError( "from test | eval x = is_null(f)", - "line 1:23: is_null function is not supported anymore, please use 'is null'/'is not null' predicates instead" + "line 1:22: is_null function is not supported anymore, please use 'is null'/'is not null' predicates instead" ); expectError( "row x = is_null(f)", - "line 1:10: is_null function is not supported anymore, please use 'is null'/'is not null' predicates instead" + "line 1:9: is_null function is not supported anymore, please use 'is null'/'is not null' predicates instead" ); if (Build.current().isSnapshot()) { expectError( "from test | eval x = ?fn1(f)", List.of(paramAsIdentifier("fn1", "IS_NULL")), - "line 1:23: is_null function is not supported anymore, please use 'is null'/'is not null' predicates instead" + "line 1:22: is_null function is not supported anymore, please use 'is null'/'is not null' predicates instead" ); } } @@ -911,23 +911,23 @@ public void testMetadataFieldOnOtherSources() { } public void testMetadataFieldMultipleDeclarations() { - expectError("from test metadata _index, _version, _index", "1:39: metadata field [_index] already declared [@1:20]"); + expectError("from test metadata _index, _version, _index", "1:38: metadata field [_index] already declared [@1:20]"); } public void testMetadataFieldUnsupportedPrimitiveType() { - expectError("from test metadata _tier", "line 1:21: unsupported metadata field [_tier]"); + expectError("from test metadata _tier", "line 1:20: unsupported metadata field [_tier]"); } public void testMetadataFieldUnsupportedCustomType() { - expectError("from test metadata _feature", "line 1:21: unsupported metadata field [_feature]"); + expectError("from test metadata _feature", "line 1:20: unsupported metadata field [_feature]"); } public void testMetadataFieldNotFoundNonExistent() { - expectError("from test metadata _doesnot_compute", "line 1:21: unsupported metadata field [_doesnot_compute]"); + expectError("from test metadata _doesnot_compute", "line 1:20: unsupported metadata field [_doesnot_compute]"); } public void testMetadataFieldNotFoundNormalField() { - expectError("from test metadata emp_no", "line 1:21: unsupported metadata field [emp_no]"); + expectError("from test metadata emp_no", "line 1:20: unsupported metadata field [emp_no]"); } public void testDissectPattern() { @@ -985,13 +985,13 @@ public void testGrokPattern() { expectError( "row a = \"foo bar\" | GROK a \"%{NUMBER:foo} %{WORD:foo}\"", - "line 1:22: Invalid GROK pattern [%{NUMBER:foo} %{WORD:foo}]:" + "line 1:21: Invalid GROK pattern [%{NUMBER:foo} %{WORD:foo}]:" + " the attribute [foo] is defined multiple times with different types" ); expectError( "row a = \"foo\" | GROK a \"(?P.+)\"", - "line 1:18: Invalid grok pattern [(?P.+)]: [undefined group option]" + "line 1:17: Invalid grok pattern [(?P.+)]: [undefined group option]" ); } @@ -1015,7 +1015,7 @@ public void testLikeRLike() { expectError( "from a | where foo like \"(?i)(^|[^a-zA-Z0-9_-])nmap($|\\\\.)\"", - "line 1:17: Invalid pattern for LIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + "line 1:16: Invalid pattern for LIKE [(?i)(^|[^a-zA-Z0-9_-])nmap($|\\.)]: " + "[Invalid sequence - escape character is not followed by special wildcard char]" ); } @@ -1076,7 +1076,7 @@ public void testEnrich() { ); expectError( "from a | enrich typo:countries on foo", - "line 1:18: Unrecognized value [typo], ENRICH policy qualifier needs to be one of [_ANY, _COORDINATOR, _REMOTE]" + "line 1:17: Unrecognized value [typo], ENRICH policy qualifier needs to be one of [_ANY, _COORDINATOR, _REMOTE]" ); } @@ -1261,8 +1261,8 @@ public void testInvalidPositionalParams() { expectError( "from test | where x < ?0 and y < ?2", List.of(paramAsConstant(null, 5)), - "line 1:24: No parameter is defined for position 0, did you mean position 1?; " - + "line 1:35: No parameter is defined for position 2, did you mean position 1?" + "line 1:23: No parameter is defined for position 0, did you mean position 1?; " + + "line 1:34: No parameter is defined for position 2, did you mean position 1?" ); expectError( @@ -2050,7 +2050,7 @@ private void assertStringAsIndexPattern(String string, String statement) { private void assertStringAsLookupIndexPattern(String string, String statement) { if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> statement(statement)); - assertThat(e.getMessage(), containsString("line 1:14: LOOKUP is in preview and only available in SNAPSHOT build")); + assertThat(e.getMessage(), containsString("line 1:14: LOOKUP_🐔 is in preview and only available in SNAPSHOT build")); return; } var plan = statement(statement); @@ -2107,18 +2107,18 @@ public void testEnrichOnMatchField() { } public void testInlineConvertWithNonexistentType() { - expectError("ROW 1::doesnotexist", "line 1:9: Unknown data type named [doesnotexist]"); - expectError("ROW \"1\"::doesnotexist", "line 1:11: Unknown data type named [doesnotexist]"); - expectError("ROW false::doesnotexist", "line 1:13: Unknown data type named [doesnotexist]"); - expectError("ROW abs(1)::doesnotexist", "line 1:14: Unknown data type named [doesnotexist]"); - expectError("ROW (1+2)::doesnotexist", "line 1:13: Unknown data type named [doesnotexist]"); + expectError("ROW 1::doesnotexist", "line 1:8: Unknown data type named [doesnotexist]"); + expectError("ROW \"1\"::doesnotexist", "line 1:10: Unknown data type named [doesnotexist]"); + expectError("ROW false::doesnotexist", "line 1:12: Unknown data type named [doesnotexist]"); + expectError("ROW abs(1)::doesnotexist", "line 1:13: Unknown data type named [doesnotexist]"); + expectError("ROW (1+2)::doesnotexist", "line 1:12: Unknown data type named [doesnotexist]"); } public void testLookup() { - String query = "ROW a = 1 | LOOKUP t ON j"; + String query = "ROW a = 1 | LOOKUP_🐔 t ON j"; if (Build.current().isSnapshot() == false) { var e = expectThrows(ParsingException.class, () -> statement(query)); - assertThat(e.getMessage(), containsString("line 1:13: mismatched input 'LOOKUP' expecting {")); + assertThat(e.getMessage(), containsString("line 1:13: mismatched input 'LOOKUP_🐔' expecting {")); return; } var plan = statement(query); @@ -2131,7 +2131,7 @@ public void testLookup() { } public void testInlineConvertUnsupportedType() { - expectError("ROW 3::BYTE", "line 1:6: Unsupported conversion to type [BYTE]"); + expectError("ROW 3::BYTE", "line 1:5: Unsupported conversion to type [BYTE]"); } public void testMetricsWithoutStats() { @@ -2300,7 +2300,6 @@ public void testMetricWithGroupKeyAsAgg() { } public void testMatchOperatorConstantQueryString() { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); var plan = statement("FROM test | WHERE field:\"value\""); var filter = as(plan, Filter.class); var match = (Match) filter.condition(); @@ -2310,7 +2309,6 @@ public void testMatchOperatorConstantQueryString() { } public void testInvalidMatchOperator() { - assumeTrue("skipping because MATCH operator is not enabled", EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.isEnabled()); expectError("from test | WHERE field:", "line 1:25: mismatched input '' expecting {QUOTED_STRING, "); expectError( "from test | WHERE field:CONCAT(\"hello\", \"world\")", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java index 6936c96a143d4..eea408914f4c5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java @@ -8,13 +8,11 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.ExpressionWritables; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; +import org.elasticsearch.xpack.esql.plan.PlanWritables; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelationSerializationTests; import java.util.ArrayList; @@ -32,12 +30,10 @@ public static LogicalPlan randomChild(int depth) { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(); - entries.addAll(LogicalPlan.getNamedWriteables()); - entries.addAll(AggregateFunction.getNamedWriteables()); - entries.addAll(Expression.getNamedWriteables()); - entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(Block.getNamedWriteables()); - entries.addAll(NamedExpression.getNamedWriteables()); + entries.addAll(PlanWritables.logical()); + entries.addAll(ExpressionWritables.aggregates()); + entries.addAll(ExpressionWritables.allExpressions()); + entries.addAll(BlockWritables.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java index 6b17e4efd4de7..7c75ea623b34f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinSerializationTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import java.io.IOException; import java.util.List; @@ -27,7 +28,7 @@ protected Join createTestInstance() { } private static JoinConfig randomJoinConfig() { - JoinType type = randomFrom(JoinType.values()); + JoinType type = randomFrom(JoinTypes.LEFT, JoinTypes.RIGHT, JoinTypes.INNER, JoinTypes.FULL, JoinTypes.CROSS); List matchFields = randomFieldAttributes(1, 10, false); List leftFields = randomFieldAttributes(1, 10, false); List rightFields = randomFieldAttributes(1, 10, false); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java index dde70d85ba259..13887fbd1740c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/JoinTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; -import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import java.util.ArrayList; import java.util.List; @@ -48,7 +48,7 @@ public void testExpressionsAndReferences() { Row left = new Row(Source.EMPTY, leftFields); Row right = new Row(Source.EMPTY, rightFields); - JoinConfig joinConfig = new JoinConfig(JoinType.LEFT, matchFields, leftAttributes, rightAttributes); + JoinConfig joinConfig = new JoinConfig(JoinTypes.LEFT, matchFields, leftAttributes, rightAttributes); Join join = new Join(Source.EMPTY, left, right, joinConfig); // matchfields are a subset of the left and right fields, so they don't contribute to the size of the references set. @@ -88,7 +88,7 @@ public void testTransformExprs() { Row left = new Row(Source.EMPTY, leftFields); Row right = new Row(Source.EMPTY, rightFields); - JoinConfig joinConfig = new JoinConfig(JoinType.LEFT, matchFields, leftAttributes, rightAttributes); + JoinConfig joinConfig = new JoinConfig(JoinTypes.LEFT, matchFields, leftAttributes, rightAttributes); Join join = new Join(Source.EMPTY, left, right, joinConfig); assertTrue(join.config().matchFields().stream().allMatch(ref -> ref.dataType().equals(DataType.INTEGER))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalSupplierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalSupplierTests.java index ccb27b41f2ed6..1f56150977d99 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalSupplierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/local/LocalSupplierTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.test.AbstractWireTestCase; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -81,6 +82,6 @@ protected boolean shouldBeSame(LocalSupplier newInstance) { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(Block.getNamedWriteables()); + return new NamedWriteableRegistry(BlockWritables.getNamedWriteables()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java index 4b74114a0e01c..7689b80515880 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java @@ -9,16 +9,13 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.ExpressionWritables; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.PlanWritables; import java.util.ArrayList; import java.util.List; @@ -50,13 +47,10 @@ public static Integer randomEstimatedRowSize() { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(); - entries.addAll(PhysicalPlan.getNamedWriteables()); - entries.addAll(LogicalPlan.getNamedWriteables()); - entries.addAll(AggregateFunction.getNamedWriteables()); - entries.addAll(Expression.getNamedWriteables()); - entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(Block.getNamedWriteables()); - entries.addAll(NamedExpression.getNamedWriteables()); + entries.addAll(PlanWritables.getNamedWriteables()); + entries.addAll(ExpressionWritables.aggregates()); + entries.addAll(ExpressionWritables.allExpressions()); + entries.addAll(BlockWritables.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); // Query builders entries.add(Add.ENTRY); // Used by the eval tests return new NamedWriteableRegistry(entries); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java index 5989c0de6b61d..f8e12cd4f5ba9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -66,12 +66,13 @@ protected boolean alwaysEmptySource() { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(1424048)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(1424046L)); /* * History: * 2.3mb - shorten error messages for UnsupportedAttributes #111973 * 1.8mb - cache EsFields #112008 * 1.4mb - string serialization #112929 + * 1424046b - remove node-level plan #117422 */ } @@ -80,7 +81,7 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(2774192)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(2774190)); /* * History: * 2 gb+ - start @@ -89,6 +90,7 @@ public void testManyTypeConflictsWithParent() throws IOException { * 3.1mb - cache EsFields #112008 * 2774214b - string serialization #112929 * 2774192b - remove field attribute #112881 + * 2774190b - remove node-level plan #117422 */ } @@ -103,11 +105,12 @@ private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) t * with a single root field that has many children, grandchildren etc. */ public void testDeeplyNestedFields() throws IOException { - ByteSizeValue expected = ByteSizeValue.ofBytes(47252411); + ByteSizeValue expected = ByteSizeValue.ofBytes(47252409); /* * History: * 48223371b - string serialization #112929 * 47252411b - remove field attribute #112881 + * 47252409b - remove node-level plan */ int depth = 6; @@ -123,11 +126,12 @@ public void testDeeplyNestedFields() throws IOException { * with a single root field that has many children, grandchildren etc. */ public void testDeeplyNestedFieldsKeepOnlyOne() throws IOException { - ByteSizeValue expected = ByteSizeValue.ofBytes(9425806); + ByteSizeValue expected = ByteSizeValue.ofBytes(9425804); /* * History: * 9426058b - string serialization #112929 * 9425806b - remove field attribute #112881 + * 9425804b - remove node-level plan #117422 */ int depth = 6; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java index 3c70290360a56..b36c42a1a06ab 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/FragmentExecSerializationTests.java @@ -22,8 +22,7 @@ public static FragmentExec randomFragmentExec(int depth) { LogicalPlan fragment = AbstractLogicalPlanSerializationTests.randomChild(depth); QueryBuilder esFilter = EsqlQueryRequestTests.randomQueryBuilder(); int estimatedRowSize = between(0, Integer.MAX_VALUE); - PhysicalPlan reducer = randomChild(depth); - return new FragmentExec(source, fragment, esFilter, estimatedRowSize, reducer); + return new FragmentExec(source, fragment, esFilter, estimatedRowSize); } @Override @@ -36,15 +35,13 @@ protected FragmentExec mutateInstance(FragmentExec instance) throws IOException LogicalPlan fragment = instance.fragment(); QueryBuilder esFilter = instance.esFilter(); int estimatedRowSize = instance.estimatedRowSize(); - PhysicalPlan reducer = instance.reducer(); - switch (between(0, 3)) { + switch (between(0, 2)) { case 0 -> fragment = randomValueOtherThan(fragment, () -> AbstractLogicalPlanSerializationTests.randomChild(0)); case 1 -> esFilter = randomValueOtherThan(esFilter, EsqlQueryRequestTests::randomQueryBuilder); case 2 -> estimatedRowSize = randomValueOtherThan(estimatedRowSize, () -> between(0, Integer.MAX_VALUE)); - case 3 -> reducer = randomValueOtherThan(reducer, () -> randomChild(0)); default -> throw new UnsupportedEncodingException(); } - return new FragmentExec(instance.source(), fragment, esFilter, estimatedRowSize, reducer); + return new FragmentExec(instance.source(), fragment, esFilter, estimatedRowSize); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/RowExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/RowExecSerializationTests.java deleted file mode 100644 index 3dd44cd20e369..0000000000000 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/RowExecSerializationTests.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.physical; - -import org.elasticsearch.xpack.esql.core.expression.Alias; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.expression.LiteralTests; -import org.elasticsearch.xpack.esql.core.expression.NameId; -import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.tree.SourceTests; - -import java.io.IOException; -import java.util.List; - -public class RowExecSerializationTests extends AbstractPhysicalPlanSerializationTests { - public static RowExec randomRowExec() { - Source source = randomSource(); - List fields = randomList(1, 10, RowExecSerializationTests::randomAlias); - return new RowExec(source, fields); - } - - private static Alias randomAlias() { - Source source = SourceTests.randomSource(); - String name = randomAlphaOfLength(5); - Expression child = LiteralTests.randomLiteral(); - boolean synthetic = randomBoolean(); - return new Alias(source, name, child, new NameId(), synthetic); - } - - @Override - protected RowExec createTestInstance() { - return randomRowExec(); - } - - @Override - protected RowExec mutateInstance(RowExec instance) throws IOException { - List fields = instance.fields(); - fields = randomValueOtherThan(fields, () -> randomList(1, 10, RowExecSerializationTests::randomAlias)); - return new RowExec(instance.source(), fields); - } - - @Override - protected boolean alwaysEmptySource() { - return true; - } -} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java index 8d819f9dbcd6c..55f32d07fc2cb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java @@ -305,7 +305,7 @@ private PhysicalPlan plan(String query, QueryBuilder restFilter) { // System.out.println("physical\n" + physical); physical = physical.transformUp( FragmentExec.class, - f -> new FragmentExec(f.source(), f.fragment(), restFilter, f.estimatedRowSize(), f.reducer()) + f -> new FragmentExec(f.source(), f.fragment(), restFilter, f.estimatedRowSize()) ); physical = physicalPlanOptimizer.optimize(physical); // System.out.println("optimized\n" + physical); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index f60e5384e1a6f..5d8da21c6faad 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -146,6 +146,7 @@ private LocalExecutionPlanner planner() throws IOException { null, null, null, + null, esPhysicalOperationProviders() ); } @@ -155,7 +156,7 @@ private Configuration config() { randomZone(), randomLocale(random()), "test_user", - "test_cluser", + "test_cluster", pragmas, EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(null), EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(null), @@ -186,7 +187,7 @@ private EsPhysicalOperationProviders esPhysicalOperationProviders() throws IOExc ); } releasables.add(searcher); - return new EsPhysicalOperationProviders(shardContexts); + return new EsPhysicalOperationProviders(shardContexts, null); } private IndexReader reader() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/QueryTranslatorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/QueryTranslatorTests.java index cf90cf96fe683..57210fda07f2b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/QueryTranslatorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.planner; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.analysis.Analyzer; @@ -46,7 +47,7 @@ private static Analyzer makeAnalyzer(String mappingFileName) { return new Analyzer( new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, new EnrichResolution()), - new Verifier(new Metrics(new EsqlFunctionRegistry())) + new Verifier(new Metrics(new EsqlFunctionRegistry()), new XPackLicenseState(() -> 0L)) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index c811643c8daea..e91fc6e49312d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -8,7 +8,9 @@ package org.elasticsearch.xpack.esql.planner; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.aggregation.GroupingAggregator; @@ -28,7 +30,11 @@ import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.core.expression.Attribute; @@ -39,7 +45,9 @@ import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; +import org.elasticsearch.xpack.ml.MachineLearning; +import java.io.IOException; import java.util.List; import java.util.Random; import java.util.function.Function; @@ -48,6 +56,7 @@ import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; import static java.util.stream.Collectors.joining; +import static org.apache.lucene.tests.util.LuceneTestCase.createTempDir; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.NONE; @@ -56,7 +65,16 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro private final Page testData; private final List columnNames; - public TestPhysicalOperationProviders(Page testData, List columnNames) { + public TestPhysicalOperationProviders(Page testData, List columnNames) throws IOException { + super( + new AnalysisModule( + TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build() + ), + List.of(new MachineLearning(Settings.EMPTY), new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry() + ); this.testData = testData; this.columnNames = columnNames; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java index 625cb5628d039..b606f99df437c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -353,10 +353,7 @@ public void testAcquireComputeRunningOnRemoteClusterFillsInTookTime() { assertThat(response.getTook().millis(), greaterThanOrEqualTo(0L)); assertThat(executionInfo.getCluster(remoteAlias).getTook().millis(), greaterThanOrEqualTo(0L)); assertThat(executionInfo.getCluster(remoteAlias).getTook(), equalTo(response.getTook())); - - // the status in the (remote) executionInfo will still be RUNNING, since the SUCCESSFUL status gets set on the querying - // cluster executionInfo in the acquireCompute CCS listener, NOT present in this test - see testCollectComputeResultsInCCSListener - assertThat(executionInfo.getCluster(remoteAlias).getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.RUNNING)); + assertThat(executionInfo.getCluster(remoteAlias).getStatus(), equalTo(EsqlExecutionInfo.Cluster.Status.SUCCESSFUL)); Mockito.verifyNoInteractions(transportService.getTaskManager()); } @@ -376,6 +373,17 @@ public void testAcquireComputeRunningOnQueryingClusterFillsInTookTime() { // fully filled in for cross-cluster searches executionInfo.swapCluster(localCluster, (k, v) -> new EsqlExecutionInfo.Cluster(localCluster, "logs*", false)); executionInfo.swapCluster("my_remote", (k, v) -> new EsqlExecutionInfo.Cluster("my_remote", "my_remote:logs*", false)); + + // before acquire-compute, can-match (SearchShards) runs filling in total shards and skipped shards, so simulate that here + executionInfo.swapCluster( + localCluster, + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTotalShards(10).setSkippedShards(1).build() + ); + executionInfo.swapCluster( + "my_remote", + (k, v) -> new EsqlExecutionInfo.Cluster.Builder(v).setTotalShards(10).setSkippedShards(1).build() + ); + try ( ComputeListener computeListener = ComputeListener.create( // whereRunning=localCluster simulates running on the querying cluster diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/KqlQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/KqlQueryTests.java new file mode 100644 index 0000000000000..8dfb50f84ac1e --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/KqlQueryTests.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.esql.querydsl.query; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.tree.SourceTests; +import org.elasticsearch.xpack.esql.core.util.StringUtils; +import org.elasticsearch.xpack.kql.query.KqlQueryBuilder; + +import java.time.ZoneId; +import java.time.zone.ZoneRulesException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.equalTo; + +public class KqlQueryTests extends ESTestCase { + static KqlQuery randomKqkQueryQuery() { + Map options = new HashMap<>(); + + if (randomBoolean()) { + options.put(KqlQueryBuilder.CASE_INSENSITIVE_FIELD.getPreferredName(), String.valueOf(randomBoolean())); + } + + if (randomBoolean()) { + options.put(KqlQueryBuilder.DEFAULT_FIELD_FIELD.getPreferredName(), randomIdentifier()); + } + + if (randomBoolean()) { + options.put(KqlQueryBuilder.TIME_ZONE_FIELD.getPreferredName(), randomZone().getId()); + } + + return new KqlQuery(SourceTests.randomSource(), randomAlphaOfLength(5), Collections.unmodifiableMap(options)); + } + + public void testEqualsAndHashCode() { + for (int runs = 0; runs < 100; runs++) { + checkEqualsAndHashCode(randomKqkQueryQuery(), KqlQueryTests::copy, KqlQueryTests::mutate); + } + } + + private static KqlQuery copy(KqlQuery query) { + return new KqlQuery(query.source(), query.query(), query.options()); + } + + private static KqlQuery mutate(KqlQuery query) { + List> options = Arrays.asList( + q -> new KqlQuery(SourceTests.mutate(q.source()), q.query(), q.options()), + q -> new KqlQuery(q.source(), randomValueOtherThan(q.query(), () -> randomAlphaOfLength(5)), q.options()), + q -> new KqlQuery(q.source(), q.query(), mutateOptions(q.options())) + ); + + return randomFrom(options).apply(query); + } + + private static Map mutateOptions(Map options) { + Map mutatedOptions = new HashMap<>(options); + if (options.isEmpty() == false && randomBoolean()) { + mutatedOptions = options.entrySet() + .stream() + .filter(entry -> randomBoolean()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + while (mutatedOptions.equals(options)) { + if (randomBoolean()) { + mutatedOptions = mutateOption( + mutatedOptions, + KqlQueryBuilder.CASE_INSENSITIVE_FIELD.getPreferredName(), + () -> String.valueOf(randomBoolean()) + ); + } + + if (randomBoolean()) { + mutatedOptions = mutateOption( + mutatedOptions, + KqlQueryBuilder.DEFAULT_FIELD_FIELD.getPreferredName(), + () -> randomIdentifier() + ); + } + + if (randomBoolean()) { + mutatedOptions = mutateOption( + mutatedOptions, + KqlQueryBuilder.TIME_ZONE_FIELD.getPreferredName(), + () -> randomZone().getId() + ); + } + } + + return Collections.unmodifiableMap(mutatedOptions); + } + + private static Map mutateOption(Map options, String optionName, Supplier valueSupplier) { + options = new HashMap<>(options); + options.put(optionName, randomValueOtherThan(options.get(optionName), valueSupplier)); + return options; + } + + public void testQueryBuilding() { + KqlQueryBuilder qb = getBuilder(Map.of("case_insensitive", "false")); + assertThat(qb.caseInsensitive(), equalTo(false)); + + qb = getBuilder(Map.of("case_insensitive", "false", "time_zone", "UTC", "default_field", "foo")); + assertThat(qb.caseInsensitive(), equalTo(false)); + assertThat(qb.timeZone(), equalTo(ZoneId.of("UTC"))); + assertThat(qb.defaultField(), equalTo("foo")); + + Exception e = expectThrows(IllegalArgumentException.class, () -> getBuilder(Map.of("pizza", "yummy"))); + assertThat(e.getMessage(), equalTo("illegal kql query option [pizza]")); + + e = expectThrows(ZoneRulesException.class, () -> getBuilder(Map.of("time_zone", "aoeu"))); + assertThat(e.getMessage(), equalTo("Unknown time-zone ID: aoeu")); + } + + private static KqlQueryBuilder getBuilder(Map options) { + final Source source = new Source(1, 1, StringUtils.EMPTY); + final KqlQuery kqlQuery = new KqlQuery(source, "eggplant", options); + return (KqlQueryBuilder) kqlQuery.asBuilder(); + } + + public void testToString() { + final Source source = new Source(1, 1, StringUtils.EMPTY); + final KqlQuery kqlQuery = new KqlQuery(source, "eggplant", Map.of()); + assertEquals("KqlQuery@1:2[eggplant]", kqlQuery.toString()); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java index 1f35bb5312b20..b010616cd7cf7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/ConfigurationSerializationTests.java @@ -14,9 +14,9 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; +import org.elasticsearch.compute.data.BlockWritables; import org.elasticsearch.core.Releasables; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.esql.Column; @@ -110,6 +110,6 @@ protected Configuration mutateInstance(Configuration in) { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(Block.getNamedWriteables()); + return new NamedWriteableRegistry(BlockWritables.getNamedWriteables()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index e60024ecd5db4..60b632c443f8e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.index.EsIndex; @@ -228,7 +229,8 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { IndexMode.STANDARD ) ); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); + + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of()); EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); @@ -266,7 +268,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { IndexMode.STANDARD ) ); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of()); + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of()); EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); @@ -293,7 +295,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "*", true)); - executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", false)); + executionInfo.swapCluster(remote2Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote2Alias, "mylogs1,mylogs2,logs*", true)); EsIndex esIndex = new EsIndex( "logs*,remote2:mylogs1,remote2:mylogs2,remote2:logs*", @@ -302,7 +304,7 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); // remote1 is unavailable var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of(remote1Alias, failure)); EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); @@ -341,8 +343,12 @@ public void testUpdateExecutionInfoWithClustersWithNoMatchingIndices() { ); var failure = new FieldCapabilitiesFailure(new String[] { "logs-a" }, new NoSeedNodeLeftException("unable to connect")); - IndexResolution indexResolution = IndexResolution.valid(esIndex, Map.of(remote1Alias, failure)); - EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + IndexResolution indexResolution = IndexResolution.valid(esIndex, esIndex.concreteIndices(), Map.of(remote1Alias, failure)); + VerificationException ve = expectThrows( + VerificationException.class, + () -> EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution) + ); + assertThat(ve.getDetailedMessage(), containsString("Unknown index [remote2:mylogs1,mylogs2,logs*]")); } } @@ -579,4 +585,46 @@ public void testUpdateExecutionInfoToReturnEmptyResult() { assertThat(remoteFailures.get(0).reason(), containsString("unable to connect to remote cluster")); } } + + public void testMissingIndicesIsFatal() { + String localClusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + String remote1Alias = "remote1"; + String remote2Alias = "remote2"; + String remote3Alias = "remote3"; + + // scenario 1: cluster is skip_unavailable=true - not fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "mylogs1,mylogs2,logs*", true)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(remote1Alias, executionInfo), equalTo(false)); + } + + // scenario 2: cluster is local cluster and had no concrete indices - not fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "logs*", false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "mylogs1,mylogs2,logs*", true)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(localClusterAlias, executionInfo), equalTo(false)); + } + + // scenario 3: cluster is local cluster and user specified a concrete index - fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + String localIndexExpr = randomFrom("foo*,logs", "logs", "logs,metrics", "bar*,x*,logs", "logs-1,*x*"); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, localIndexExpr, false)); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, "mylogs1,mylogs2,logs*", true)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(localClusterAlias, executionInfo), equalTo(true)); + } + + // scenario 4: cluster is skip_unavailable=false - always fatal + { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); + executionInfo.swapCluster(localClusterAlias, (k, v) -> new EsqlExecutionInfo.Cluster(localClusterAlias, "*", false)); + String indexExpr = randomFrom("foo*,logs", "logs", "bar*,x*,logs", "logs-1,*x*", "*"); + executionInfo.swapCluster(remote1Alias, (k, v) -> new EsqlExecutionInfo.Cluster(remote1Alias, indexExpr, false)); + assertThat(EsqlSessionCCSUtils.missingIndicesIsFatal(remote1Alias, executionInfo), equalTo(true)); + } + + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index 5425f770c49e8..0fe89b24dfc6a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -353,6 +353,114 @@ public void testDocsStats() { | SORT languages""", Set.of("emp_no", "emp_no.*", "languages", "languages.*")); } + public void testEvalStats() { + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY y""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY y + | SORT y""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY x = y + | SORT x""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | STATS count = COUNT(*) BY first_name + | SORT first_name""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY x = y + | SORT x, first_name""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL first_name = "a" + | STATS count = COUNT(*) BY first_name + | SORT first_name""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY first_name = to_upper(y) + | SORT first_name""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = to_upper(first_name), z = "z" + | STATS count = COUNT(*) BY first_name = to_lower(y), z + | SORT first_name""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY x = y, z = first_name + | SORT x, z""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY x = y, first_name + | SORT x, first_name""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(first_name) BY x = y + | SORT x + | DROP first_name""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY x = y + | MV_EXPAND x""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY first_name, y + | MV_EXPAND first_name""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | MV_EXPAND first_name + | EVAL y = "a" + | STATS count = COUNT(*) BY first_name, y + | SORT y""", Set.of("first_name", "first_name.*")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | MV_EXPAND y + | STATS count = COUNT(*) BY x = y + | SORT x""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY x = y + | STATS count = COUNT(count) by x + | SORT x""", Set.of("_index")); + + assertFieldNames(""" + FROM employees + | EVAL y = "a" + | STATS count = COUNT(*) BY first_name, y + | STATS count = COUNT(count) by x = y + | SORT x""", Set.of("first_name", "first_name.*")); + } + public void testSortWithLimitOne_DropHeight() { assertFieldNames("from employees | sort languages | limit 1 | drop height*", ALL_FIELDS); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index 116df21a33ac0..b323efad2b4c3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.index.IndexMode; import org.elasticsearch.indices.IndicesExpressionGrouper; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -102,7 +103,7 @@ public void testFailedMetric() { return null; }).when(esqlClient).execute(eq(EsqlResolveFieldsAction.TYPE), any(), any()); - var planExecutor = new PlanExecutor(indexResolver, MeterRegistry.NOOP); + var planExecutor = new PlanExecutor(indexResolver, MeterRegistry.NOOP, new XPackLicenseState(() -> 0L)); var enrichResolver = mockEnrichResolver(); var request = new EsqlQueryRequest(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/VerifierMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/VerifierMetricsTests.java index 5e6588d2295f9..eda906b147956 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/VerifierMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/VerifierMetricsTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.stats; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.watcher.common.stats.Counters; import org.elasticsearch.xpack.esql.analysis.Verifier; @@ -205,7 +206,7 @@ public void testTwoWhereQuery() { public void testTwoQueriesExecuted() { Metrics metrics = new Metrics(new EsqlFunctionRegistry()); - Verifier verifier = new Verifier(metrics); + Verifier verifier = new Verifier(metrics, new XPackLicenseState(() -> 0L)); esqlWithVerifier(""" from employees | where languages > 2 @@ -252,7 +253,7 @@ public void testTwoQueriesExecuted() { public void testMultipleFunctions() { Metrics metrics = new Metrics(new EsqlFunctionRegistry()); - Verifier verifier = new Verifier(metrics); + Verifier verifier = new Verifier(metrics, new XPackLicenseState(() -> 0L)); esqlWithVerifier(""" from employees | where languages > 2 @@ -526,7 +527,7 @@ private Counters esql(String esql, Verifier v) { Metrics metrics = null; if (v == null) { metrics = new Metrics(new EsqlFunctionRegistry()); - verifier = new Verifier(metrics); + verifier = new Verifier(metrics, new XPackLicenseState(() -> 0L)); } analyzer(verifier).analyze(parser.createStatement(esql)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java index 82f0ebf316508..c1d94933537f0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java @@ -43,7 +43,9 @@ import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Grok; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinConfig; import org.elasticsearch.xpack.esql.plan.logical.join.JoinType; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.StatsType; @@ -436,8 +438,9 @@ public void accept(Page page) { } else if (argClass == Integer.class) { return randomInt(); } else if (argClass == JoinType.class) { - return JoinType.LEFT; + return JoinTypes.LEFT; } + if (Expression.class == argClass) { /* * Rather than use any old subclass of expression lets @@ -488,6 +491,15 @@ public void accept(Page page) { if (argClass == Configuration.class) { return randomConfiguration(); } + if (argClass == JoinConfig.class) { + return new JoinConfig( + JoinTypes.LEFT, + List.of(UnresolvedAttributeTests.randomUnresolvedAttribute()), + List.of(UnresolvedAttributeTests.randomUnresolvedAttribute()), + List.of(UnresolvedAttributeTests.randomUnresolvedAttribute()) + ); + } + try { return mock(argClass); } catch (MockitoException e) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java index f533c20975aff..987ab103cf80b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java @@ -10,14 +10,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.test.AbstractWireTestCase; -import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; -import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.esql.expression.ExpressionWritables; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; @@ -92,9 +91,8 @@ protected MultiTypeEsField mutateInstance(MultiTypeEsField instance) throws IOEx @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(UnaryScalarFunction.getNamedWriteables()); - entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(Expression.getNamedWriteables()); + List entries = new ArrayList<>(ExpressionWritables.allExpressions()); + entries.addAll(ExpressionWritables.unaryScalars()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/fleet/qa/rest/build.gradle b/x-pack/plugin/fleet/qa/rest/build.gradle index dec624bc3cc56..0959e883997d3 100644 --- a/x-pack/plugin/fleet/qa/rest/build.gradle +++ b/x-pack/plugin/fleet/qa/rest/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-test-artifact' @@ -27,7 +25,7 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/40_secrets_get.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/40_secrets_get.yml index e74283bc873e3..ab150e41f310a 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/40_secrets_get.yml +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/40_secrets_get.yml @@ -3,6 +3,12 @@ fleet.post_secret: body: '{"value": "test secret"}' - set: { id: id } + # search node needs to be available for fleet.get_secret to work in stateless. + # The `.fleet-secrets` index is created on demand, and its search replica starts out unassigned, + # so wait_for_no_uninitialized_shards can miss it. + - do: + cluster.health: + wait_for_active_shards: all - do: fleet.get_secret: id: $id diff --git a/x-pack/plugin/identity-provider/build.gradle b/x-pack/plugin/identity-provider/build.gradle index dd085e62efa48..f9c121da0f550 100644 --- a/x-pack/plugin/identity-provider/build.gradle +++ b/x-pack/plugin/identity-provider/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.internal-cluster-test' @@ -281,7 +287,7 @@ tasks.named("thirdPartyAudit").configure { addQaCheckDependencies(project) -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // We don't support the IDP in FIPS-140 mode, so no need to run tests tasks.named("test").configure { enabled = false } } diff --git a/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle b/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle index 46e705ce27244..cbdb25825623d 100644 --- a/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle +++ b/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.legacy-java-rest-test' dependencies { @@ -48,6 +54,6 @@ testClusters.configureEach { // We don't support the IDP in FIPS-140 mode, so no need to run java rest tests tasks.named("javaRestTest").configure { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle index 111496669afe3..8bc2967fc63de 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-cluster/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.info.BuildParams import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.internal-testclusters' @@ -59,5 +65,5 @@ testClusters.matching{ it.name == 'follow-cluster' }.configureEach { tasks.named("check").configure { dependsOn 'follow-cluster' } // Security is explicitly disabled for follow-cluster and leader-cluster, do not run these in FIPS mode tasks.withType(Test).configureEach { - enabled = BuildParams.inFipsJvm == false + enabled = buildParams.inFipsJvm == false } diff --git a/x-pack/plugin/ilm/qa/multi-node/build.gradle b/x-pack/plugin/ilm/qa/multi-node/build.gradle index 8712af84ac245..4cd41e58b11ac 100644 --- a/x-pack/plugin/ilm/qa/multi-node/build.gradle +++ b/x-pack/plugin/ilm/qa/multi-node/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -40,7 +46,7 @@ testClusters.configureEach { setting 'time_series.poll_interval', '10m' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ilm/src/main/java/module-info.java b/x-pack/plugin/ilm/src/main/java/module-info.java index 591c9786247e6..aa24c2d6f333c 100644 --- a/x-pack/plugin/ilm/src/main/java/module-info.java +++ b/x-pack/plugin/ilm/src/main/java/module-info.java @@ -18,6 +18,4 @@ provides org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider with org.elasticsearch.xpack.ilm.ReservedLifecycleStateHandlerProvider; - - provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.ilm.IndexLifecycleFeatures; } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index f41524480e2df..f830a2821d841 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -146,7 +146,6 @@ public Collection createComponents(PluginServices services) { ILMHistoryTemplateRegistry ilmTemplateRegistry = new ILMHistoryTemplateRegistry( settings, services.clusterService(), - services.featureService(), services.threadPool(), services.client(), services.xContentRegistry() diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatures.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatures.java deleted file mode 100644 index cc78271e2d878..0000000000000 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleFeatures.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.ilm; - -import org.elasticsearch.Version; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.xpack.ilm.history.ILMHistoryTemplateRegistry; - -import java.util.Map; - -public class IndexLifecycleFeatures implements FeatureSpecification { - @Override - public Map getHistoricalFeatures() { - return Map.of(ILMHistoryTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); - } -} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java index 28c28ef6e4c55..5633033e6faa1 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java @@ -8,12 +8,9 @@ package org.elasticsearch.xpack.ilm.history; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ClientHelper; @@ -40,13 +37,11 @@ public class ILMHistoryTemplateRegistry extends IndexTemplateRegistry { // version 6: manage by data stream lifecycle // version 7: version the index template name so we can upgrade existing deployments public static final int INDEX_TEMPLATE_VERSION = 7; - public static final NodeFeature MANAGED_BY_DATA_STREAM_LIFECYCLE = new NodeFeature("ilm-history-managed-by-dsl"); public static final String ILM_TEMPLATE_VERSION_VARIABLE = "xpack.ilm_history.template.version"; public static final String ILM_TEMPLATE_NAME = "ilm-history-" + INDEX_TEMPLATE_VERSION; public static final String ILM_POLICY_NAME = "ilm-history-ilm-policy"; - private final FeatureService featureService; @Override protected boolean requiresMasterNode() { @@ -58,13 +53,11 @@ protected boolean requiresMasterNode() { public ILMHistoryTemplateRegistry( Settings nodeSettings, ClusterService clusterService, - FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); - this.featureService = featureService; this.ilmHistoryEnabled = LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); } @@ -104,9 +97,4 @@ protected List getLifecyclePolicies() { protected String getOrigin() { return ClientHelper.INDEX_LIFECYCLE_ORIGIN; } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - return featureService.clusterHasFeature(event.state(), MANAGED_BY_DATA_STREAM_LIFECYCLE); - } } diff --git a/x-pack/plugin/ilm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/ilm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification deleted file mode 100644 index 1bf03ae25edd2..0000000000000 --- a/x-pack/plugin/ilm/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ /dev/null @@ -1,8 +0,0 @@ -# -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License -# 2.0; you may not use this file except in compliance with the Elastic License -# 2.0. -# - -org.elasticsearch.xpack.ilm.IndexLifecycleFeatures diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java index cbdda089e8328..1797f6b10f3cb 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ClusterServiceUtils; @@ -40,7 +39,6 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xpack.ilm.IndexLifecycleFeatures; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; @@ -80,7 +78,6 @@ public void setup() { ILMHistoryTemplateRegistry registry = new ILMHistoryTemplateRegistry( clusterService.getSettings(), clusterService, - new FeatureService(List.of(new IndexLifecycleFeatures())), threadPool, client, NamedXContentRegistry.EMPTY diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 29d5add35ff49..3c19e11a450b4 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -4,7 +4,6 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java index 1fef26989d845..ba3e48e11928d 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/DefaultEndPointsIT.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.inference; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseListener; +import org.elasticsearch.common.Strings; import org.elasticsearch.inference.TaskType; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalService; @@ -16,9 +19,12 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; @@ -45,9 +51,16 @@ public void tearDown() throws Exception { super.tearDown(); } + public void testGet() throws IOException { + var elserModel = getModel(ElasticsearchInternalService.DEFAULT_ELSER_ID); + assertDefaultElserConfig(elserModel); + + var e5Model = getModel(ElasticsearchInternalService.DEFAULT_E5_ID); + assertDefaultE5Config(e5Model); + } + @SuppressWarnings("unchecked") public void testInferDeploysDefaultElser() throws IOException { - assumeTrue("Default config requires a feature flag", DefaultElserFeatureFlag.isEnabled()); var model = getModel(ElasticsearchInternalService.DEFAULT_ELSER_ID); assertDefaultElserConfig(model); @@ -74,11 +87,11 @@ private static void assertDefaultElserConfig(Map modelConfig) { adaptiveAllocations, Matchers.is(Map.of("enabled", true, "min_number_of_allocations", 0, "max_number_of_allocations", 32)) ); + assertDefaultChunkingSettings(modelConfig); } @SuppressWarnings("unchecked") public void testInferDeploysDefaultE5() throws IOException { - assumeTrue("Default config requires a feature flag", DefaultElserFeatureFlag.isEnabled()); var model = getModel(ElasticsearchInternalService.DEFAULT_E5_ID); assertDefaultE5Config(model); @@ -109,5 +122,49 @@ private static void assertDefaultE5Config(Map modelConfig) { adaptiveAllocations, Matchers.is(Map.of("enabled", true, "min_number_of_allocations", 0, "max_number_of_allocations", 32)) ); + assertDefaultChunkingSettings(modelConfig); + } + + @SuppressWarnings("unchecked") + private static void assertDefaultChunkingSettings(Map modelConfig) { + var chunkingSettings = (Map) modelConfig.get("chunking_settings"); + assertThat( + modelConfig.toString(), + chunkingSettings, + Matchers.is(Map.of("strategy", "sentence", "max_chunk_size", 250, "sentence_overlap", 1)) + ); + } + + public void testMultipleInferencesTriggeringDownloadAndDeploy() throws InterruptedException { + int numParallelRequests = 4; + var latch = new CountDownLatch(numParallelRequests); + var errors = new ArrayList(); + + var listener = new ResponseListener() { + @Override + public void onSuccess(Response response) { + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + errors.add(exception); + latch.countDown(); + } + }; + + var inputs = List.of("Hello World", "Goodnight moon"); + var queryParams = Map.of("timeout", "120s"); + for (int i = 0; i < numParallelRequests; i++) { + var request = createInferenceRequest( + Strings.format("_inference/%s", ElasticsearchInternalService.DEFAULT_ELSER_ID), + inputs, + queryParams + ); + client().performRequestAsync(request, listener); + } + + latch.await(); + assertThat(errors.toString(), errors, empty()); } } diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index 6790b9bb14c5a..4e32ef99d06dd 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -373,12 +373,17 @@ protected Map infer(String modelId, TaskType taskType, List inferInternal(String endpoint, List input, Map queryParameters) throws IOException { + protected Request createInferenceRequest(String endpoint, List input, Map queryParameters) { var request = new Request("POST", endpoint); request.setJsonEntity(jsonBody(input)); if (queryParameters.isEmpty() == false) { request.addParameters(queryParameters); } + return request; + } + + private Map inferInternal(String endpoint, List input, Map queryParameters) throws IOException { + var request = createInferenceRequest(endpoint, input, queryParameters); var response = client().performRequest(request); assertOkOrCreated(response); return entityAsMap(response); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index 081c83b1e7067..f5773e73f2b22 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -44,18 +44,18 @@ public void testCRUD() throws IOException { } var getAllModels = getAllModels(); - int numModels = DefaultElserFeatureFlag.isEnabled() ? 11 : 9; + int numModels = 11; assertThat(getAllModels, hasSize(numModels)); var getSparseModels = getModels("_all", TaskType.SPARSE_EMBEDDING); - int numSparseModels = DefaultElserFeatureFlag.isEnabled() ? 6 : 5; + int numSparseModels = 6; assertThat(getSparseModels, hasSize(numSparseModels)); for (var sparseModel : getSparseModels) { assertEquals("sparse_embedding", sparseModel.get("task_type")); } var getDenseModels = getModels("_all", TaskType.TEXT_EMBEDDING); - int numDenseModels = DefaultElserFeatureFlag.isEnabled() ? 5 : 4; + int numDenseModels = 5; assertThat(getDenseModels, hasSize(numDenseModels)); for (var denseModel : getDenseModels) { assertEquals("text_embedding", denseModel.get("task_type")); @@ -134,7 +134,8 @@ public void testApisWithoutTaskType() throws IOException { @SuppressWarnings("unchecked") public void testGetServicesWithoutTaskType() throws IOException { List services = getAllServices(); - if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() + || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { assertThat(services.size(), equalTo(18)); } else { assertThat(services.size(), equalTo(17)); @@ -169,7 +170,8 @@ public void testGetServicesWithoutTaskType() throws IOException { "watsonxai" ) ); - if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() + || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { providerList.add(6, "elastic"); } assertArrayEquals(providers, providerList.toArray()); @@ -257,7 +259,8 @@ public void testGetServicesWithCompletionTaskType() throws IOException { public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { List services = getServices(TaskType.SPARSE_EMBEDDING); - if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() + || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { assertThat(services.size(), equalTo(5)); } else { assertThat(services.size(), equalTo(4)); @@ -272,7 +275,8 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { Arrays.sort(providers); var providerList = new ArrayList<>(Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "test_service")); - if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + if ((ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled() + || ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled())) { providerList.add(1, "elastic"); } assertArrayEquals(providers, providerList.toArray()); @@ -432,7 +436,7 @@ public void testUnsupportedStream() throws Exception { assertEquals(TaskType.SPARSE_EMBEDDING.toString(), singleModel.get("task_type")); try { - var events = streamInferOnMockService(modelId, TaskType.SPARSE_EMBEDDING, List.of(randomAlphaOfLength(10))); + var events = streamInferOnMockService(modelId, TaskType.SPARSE_EMBEDDING, List.of(randomUUID())); assertThat(events.size(), equalTo(2)); events.forEach(event -> { switch (event.name()) { @@ -457,7 +461,7 @@ public void testSupportedStream() throws Exception { assertEquals(modelId, singleModel.get("inference_id")); assertEquals(TaskType.COMPLETION.toString(), singleModel.get("task_type")); - var input = IntStream.range(1, 2 + randomInt(8)).mapToObj(i -> randomAlphaOfLength(10)).toList(); + var input = IntStream.range(1, 2 + randomInt(8)).mapToObj(i -> randomUUID()).toList(); try { var events = streamInferOnMockService(modelId, TaskType.COMPLETION, input); diff --git a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle index 64edb196397a0..0f8c732154e85 100644 --- a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle @@ -1,6 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -20,7 +26,7 @@ def supportedVersion = bwcVersion -> { return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } -BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> def javaRestTest = tasks.register("v${bwcVersion}#javaRestTest", StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java b/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java index 8cb37ad645358..c16271ed44083 100644 --- a/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java +++ b/x-pack/plugin/inference/qa/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/inference/qa/mixed/CohereServiceMixedIT.java @@ -135,6 +135,7 @@ public void testRerank() throws IOException { final String inferenceId = "mixed-cluster-rerank"; + cohereRerankServer.enqueue(new MockResponse().setResponseCode(200).setBody(rerankResponse())); put(inferenceId, rerankConfig(getUrl(cohereRerankServer)), TaskType.RERANK); assertRerank(inferenceId); diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle b/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle index 5d72fc96d98d8..214d775b46236 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle +++ b/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle @@ -5,7 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -20,7 +19,7 @@ dependencies { } // Inference API added in 8.11 -BuildParams.bwcVersions.withWireCompatible(v -> v.after("8.11.0")) { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible(v -> v.after("8.11.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java index 32969ffd1d112..0acbc148515bd 100644 --- a/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java +++ b/x-pack/plugin/inference/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/xpack/application/CohereServiceUpgradeIT.java @@ -201,6 +201,7 @@ public void testRerank() throws IOException { var testTaskType = TaskType.RERANK; if (isOldCluster()) { + cohereRerankServer.enqueue(new MockResponse().setResponseCode(200).setBody(rerankResponse())); put(oldClusterId, rerankConfig(getUrl(cohereRerankServer)), testTaskType); var configs = (List>) get(testTaskType, oldClusterId).get(old_cluster_endpoint_identifier); assertThat(configs, hasSize(1)); @@ -229,6 +230,7 @@ public void testRerank() throws IOException { assertRerank(oldClusterId); // New endpoint + cohereRerankServer.enqueue(new MockResponse().setResponseCode(200).setBody(rerankResponse())); put(upgradedClusterId, rerankConfig(getUrl(cohereRerankServer)), testTaskType); configs = (List>) get(upgradedClusterId).get("endpoints"); assertThat(configs, hasSize(1)); diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java index 2ddc4f6c3e2f6..ae11a02d312e2 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestDenseInferenceServiceExtension.java @@ -18,7 +18,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; @@ -140,7 +139,6 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java index 2075c1b1924bf..9320571572f0a 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestRerankingServiceExtension.java @@ -17,7 +17,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; @@ -128,7 +127,6 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java index 3d6f0ce6eba05..fe0223cce0323 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestSparseInferenceServiceExtension.java @@ -17,7 +17,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; @@ -131,7 +130,6 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java index 595b92a6be66b..6d7983bc8cb53 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/TestStreamingCompletionServiceExtension.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceExtension; @@ -160,7 +159,6 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java index 54d83af8f5d95..3b0fc869c8124 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -30,8 +31,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Locale; import java.util.Map; +import java.util.Set; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomSemanticTextInput; import static org.hamcrest.Matchers.equalTo; @@ -87,30 +90,38 @@ public void testBulkOperations() throws Exception { int totalBulkReqs = randomIntBetween(2, 100); long totalDocs = 0; + Set ids = new HashSet<>(); for (int bulkReqs = 0; bulkReqs < totalBulkReqs; bulkReqs++) { BulkRequestBuilder bulkReqBuilder = client().prepareBulk(); int totalBulkSize = randomIntBetween(1, 100); for (int bulkSize = 0; bulkSize < totalBulkSize; bulkSize++) { - String id = Long.toString(totalDocs); + if (ids.size() > 0 && rarely(random())) { + String id = randomFrom(ids); + ids.remove(id); + DeleteRequestBuilder request = new DeleteRequestBuilder(client(), INDEX_NAME).setId(id); + bulkReqBuilder.add(request); + continue; + } + String id = Long.toString(totalDocs++); boolean isIndexRequest = randomBoolean(); Map source = new HashMap<>(); source.put("sparse_field", isIndexRequest && rarely() ? null : randomSemanticTextInput()); source.put("dense_field", isIndexRequest && rarely() ? null : randomSemanticTextInput()); if (isIndexRequest) { bulkReqBuilder.add(new IndexRequestBuilder(client()).setIndex(INDEX_NAME).setId(id).setSource(source)); - totalDocs++; + ids.add(id); } else { boolean isUpsert = randomBoolean(); UpdateRequestBuilder request = new UpdateRequestBuilder(client()).setIndex(INDEX_NAME).setDoc(source); - if (isUpsert || totalDocs == 0) { + if (isUpsert || ids.size() == 0) { request.setDocAsUpsert(true); - totalDocs++; } else { // Update already existing document - id = Long.toString(randomLongBetween(0, totalDocs - 1)); + id = randomFrom(ids); } request.setId(id); bulkReqBuilder.add(request); + ids.add(id); } } BulkResponse bulkResponse = bulkReqBuilder.get(); @@ -135,7 +146,7 @@ public void testBulkOperations() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0).trackTotalHits(true); SearchResponse searchResponse = client().search(new SearchRequest(INDEX_NAME).source(sourceBuilder)).get(); try { - assertThat(searchResponse.getHits().getTotalHits().value(), equalTo(totalDocs)); + assertThat(searchResponse.getHits().getTotalHits().value(), equalTo((long) ids.size())); } finally { searchResponse.decRef(); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/DefaultElserFeatureFlag.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/DefaultElserFeatureFlag.java deleted file mode 100644 index 2a764dabd62ae..0000000000000 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/DefaultElserFeatureFlag.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.inference; - -import org.elasticsearch.common.util.FeatureFlag; - -public class DefaultElserFeatureFlag { - - private DefaultElserFeatureFlag() {} - - private static final FeatureFlag FEATURE_FLAG = new FeatureFlag("inference_default_elser"); - - public static boolean isEnabled() { - return FEATURE_FLAG.isEnabled(); - } -} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 10ffedef14e26..c82f287792a7c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; -import java.util.HashSet; import java.util.Set; /** @@ -24,23 +23,24 @@ public class InferenceFeatures implements FeatureSpecification { @Override public Set getFeatures() { - var features = new HashSet(); - features.add(TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED); - features.add(RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED); - features.add(SemanticTextFieldMapper.SEMANTIC_TEXT_SEARCH_INFERENCE_ID); - features.add(SemanticQueryBuilder.SEMANTIC_TEXT_INNER_HITS); - features.add(TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED); - if (DefaultElserFeatureFlag.isEnabled()) { - features.add(SemanticTextFieldMapper.SEMANTIC_TEXT_DEFAULT_ELSER_2); - } - return Set.copyOf(features); + return Set.of( + TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED, + RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED, + SemanticTextFieldMapper.SEMANTIC_TEXT_SEARCH_INFERENCE_ID, + SemanticQueryBuilder.SEMANTIC_TEXT_INNER_HITS, + SemanticTextFieldMapper.SEMANTIC_TEXT_DEFAULT_ELSER_2, + TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED + ); } @Override public Set getTestFeatures() { return Set.of( SemanticTextFieldMapper.SEMANTIC_TEXT_IN_OBJECT_FIELD_FIX, - SemanticTextFieldMapper.SEMANTIC_TEXT_SINGLE_FIELD_UPDATE_FIX + SemanticTextFieldMapper.SEMANTIC_TEXT_SINGLE_FIELD_UPDATE_FIX, + SemanticTextFieldMapper.SEMANTIC_TEXT_DELETE_FIX, + SemanticTextFieldMapper.SEMANTIC_TEXT_ZERO_SIZE_FIX, + SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java index 02bddb6076d69..2320cca8295d1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceNamedWriteablesProvider.java @@ -63,6 +63,7 @@ import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandInternalTextEmbeddingServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.CustomElandRerankTaskSettings; +import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticRerankerServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElserInternalServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElserMlNodeTaskSettings; @@ -415,7 +416,13 @@ private static void addInternalNamedWriteables(List namedWriteables) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index f068caff805af..48458bf4f5086 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.inference; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -91,7 +93,6 @@ import org.elasticsearch.xpack.inference.services.cohere.CohereService; import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceService; import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceComponents; -import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceFeature; import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSettings; import org.elasticsearch.xpack.inference.services.elasticsearch.ElasticsearchInternalService; import org.elasticsearch.xpack.inference.services.googleaistudio.GoogleAiStudioService; @@ -113,6 +114,9 @@ import java.util.stream.Stream; import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceService.ELASTIC_INFERENCE_SERVICE_IDENTIFIER; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceFeature.DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG; public class InferencePlugin extends Plugin implements ActionPlugin, ExtensiblePlugin, SystemIndexPlugin, MapperPlugin, SearchPlugin { @@ -135,11 +139,13 @@ public class InferencePlugin extends Plugin implements ActionPlugin, ExtensibleP public static final String NAME = "inference"; public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; + private static final Logger log = LogManager.getLogger(InferencePlugin.class); + private final Settings settings; private final SetOnce httpFactory = new SetOnce<>(); private final SetOnce amazonBedrockFactory = new SetOnce<>(); private final SetOnce serviceComponents = new SetOnce<>(); - private final SetOnce eisComponents = new SetOnce<>(); + private final SetOnce elasticInferenceServiceComponents = new SetOnce<>(); private final SetOnce inferenceServiceRegistry = new SetOnce<>(); private final SetOnce shardBulkInferenceActionFilter = new SetOnce<>(); private List inferenceServiceExtensions; @@ -207,12 +213,35 @@ public Collection createComponents(PluginServices services) { var inferenceServices = new ArrayList<>(inferenceServiceExtensions); inferenceServices.add(this::getInferenceServiceFactories); - if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - ElasticInferenceServiceSettings eisSettings = new ElasticInferenceServiceSettings(settings); - eisComponents.set(new ElasticInferenceServiceComponents(eisSettings.getEisGatewayUrl())); + // Set elasticInferenceUrl based on feature flags to support transitioning to the new Elastic Inference Service URL without exposing + // internal names like "eis" or "gateway". + ElasticInferenceServiceSettings inferenceServiceSettings = new ElasticInferenceServiceSettings(settings); + + String elasticInferenceUrl = null; + + if (ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + elasticInferenceUrl = inferenceServiceSettings.getElasticInferenceServiceUrl(); + } else if (DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { + log.warn( + "Deprecated flag {} detected for enabling {}. Please use {}.", + ELASTIC_INFERENCE_SERVICE_IDENTIFIER, + DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG, + ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG + ); + elasticInferenceUrl = inferenceServiceSettings.getEisGatewayUrl(); + } + + if (elasticInferenceUrl != null) { + elasticInferenceServiceComponents.set(new ElasticInferenceServiceComponents(elasticInferenceUrl)); inferenceServices.add( - () -> List.of(context -> new ElasticInferenceService(httpFactory.get(), serviceComponents.get(), eisComponents.get())) + () -> List.of( + context -> new ElasticInferenceService( + httpFactory.get(), + serviceComponents.get(), + elasticInferenceServiceComponents.get() + ) + ) ); } @@ -227,10 +256,8 @@ public Collection createComponents(PluginServices services) { // reference correctly var registry = new InferenceServiceRegistry(inferenceServices, factoryContext); registry.init(services.client()); - if (DefaultElserFeatureFlag.isEnabled()) { - for (var service : registry.getServices().values()) { - service.defaultConfigIds().forEach(modelRegistry::addDefaultIds); - } + for (var service : registry.getServices().values()) { + service.defaultConfigIds().forEach(modelRegistry::addDefaultIds); } inferenceServiceRegistry.set(registry); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java index b3bbe3a7df9bc..d178e927aa65d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java @@ -30,7 +30,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; import org.elasticsearch.inference.InputType; @@ -337,16 +336,7 @@ private void onFinish() { } }; inferenceProvider.service() - .chunkedInfer( - inferenceProvider.model(), - null, - inputs, - Map.of(), - InputType.INGEST, - new ChunkingOptions(null, null), - TimeValue.MAX_VALUE, - completionListener - ); + .chunkedInfer(inferenceProvider.model(), null, inputs, Map.of(), InputType.INGEST, TimeValue.MAX_VALUE, completionListener); } private FieldInferenceResponseAccumulator ensureResponseAccumulatorSlot(int id) { @@ -413,8 +403,8 @@ private void applyInferenceResponses(BulkItemRequest item, FieldInferenceRespons */ private Map> createFieldInferenceRequests(BulkShardRequest bulkShardRequest) { Map> fieldRequestsMap = new LinkedHashMap<>(); - int itemIndex = 0; - for (var item : bulkShardRequest.items()) { + for (int itemIndex = 0; itemIndex < bulkShardRequest.items().length; itemIndex++) { + var item = bulkShardRequest.items()[itemIndex]; if (item.getPrimaryResponse() != null) { // item was already aborted/processed by a filter in the chain upstream (e.g. security) continue; @@ -441,6 +431,7 @@ private Map> createFieldInferenceRequests(Bu // ignore delete request continue; } + final Map docMap = indexRequest.sourceAsMap(); for (var entry : fieldInferenceMap.values()) { String field = entry.getName(); @@ -483,7 +474,6 @@ private Map> createFieldInferenceRequests(Bu } } } - itemIndex++; } return fieldRequestsMap; } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java index bbc5082d45004..b74e473155aec 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/RateLimiter.java @@ -28,6 +28,14 @@ * * By setting the accumulated tokens limit to a value greater than zero, it effectively allows bursts of traffic. If the accumulated * tokens limit is set to zero, it will force the acquiring thread to wait on each call. + * + * Example: + * Time unit: Second + * Tokens to produce per time unit: 10 + * Limit for tokens in bucket: 100 + * + * Tokens in bucket after n seconds (n second -> tokens in bucket): + * 1 sec -> 10 tokens, 2 sec -> 20 tokens, ... , 10 sec -> 100 tokens (bucket full), ... 200 sec -> 100 tokens (no increase in tokens) */ public class RateLimiter { @@ -76,6 +84,7 @@ public final synchronized void setRate(double newAccumulatedTokensLimit, double throw new IllegalArgumentException(Strings.format("Tokens per time unit must be less than or equal to %s", Double.MAX_VALUE)); } + // If the new token limit is smaller than what we've accumulated already we need to drop tokens to meet the new token limit accumulatedTokens = Math.min(accumulatedTokens, newAccumulatedTokensLimit); accumulatedTokensLimit = newAccumulatedTokensLimit; @@ -88,7 +97,8 @@ public final synchronized void setRate(double newAccumulatedTokensLimit, double } /** - * Causes the thread to wait until the tokens are available + * Causes the thread to wait until the tokens are available. + * This reserves token in advance leading to a reduction of accumulated tokens. * @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here * @throws InterruptedException _ */ @@ -130,6 +140,7 @@ private static void validateTokenRequest(int tokens) { /** * Returns the amount of time to wait for the tokens to become available. + * This reserves tokens in advance leading to a reduction of accumulated tokens. * @param tokens the number of items of work that should be throttled, typically you'd pass a value of 1 here. Must be greater than 0. * @return the amount of time to wait */ diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/elastic/ElasticInferenceServiceActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/elastic/ElasticInferenceServiceActionCreator.java index c8ada6e535b63..fa096901ed67a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/elastic/ElasticInferenceServiceActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/elastic/ElasticInferenceServiceActionCreator.java @@ -15,9 +15,11 @@ import org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceServiceSparseEmbeddingsModel; import org.elasticsearch.xpack.inference.telemetry.TraceContext; +import java.util.Locale; import java.util.Objects; import static org.elasticsearch.xpack.inference.external.action.ActionUtils.constructFailedToSendRequestMessage; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceService.ELASTIC_INFERENCE_SERVICE_IDENTIFIER; public class ElasticInferenceServiceActionCreator implements ElasticInferenceServiceActionVisitor { @@ -36,7 +38,10 @@ public ElasticInferenceServiceActionCreator(Sender sender, ServiceComponents ser @Override public ExecutableAction create(ElasticInferenceServiceSparseEmbeddingsModel model) { var requestManager = new ElasticInferenceServiceSparseEmbeddingsRequestManager(model, serviceComponents, traceContext); - var errorMessage = constructFailedToSendRequestMessage(model.uri(), "Elastic Inference Service sparse embeddings"); + var errorMessage = constructFailedToSendRequestMessage( + model.uri(), + String.format(Locale.ROOT, "%s sparse embeddings", ELASTIC_INFERENCE_SERVICE_IDENTIFIER) + ); return new SenderExecutableAction(sender, requestManager, errorMessage); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ElasticInferenceServiceSparseEmbeddingsRequestManager.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ElasticInferenceServiceSparseEmbeddingsRequestManager.java index e7ee41525f07d..bf3409888aaf8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ElasticInferenceServiceSparseEmbeddingsRequestManager.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/ElasticInferenceServiceSparseEmbeddingsRequestManager.java @@ -22,9 +22,11 @@ import org.elasticsearch.xpack.inference.telemetry.TraceContext; import java.util.List; +import java.util.Locale; import java.util.function.Supplier; import static org.elasticsearch.xpack.inference.common.Truncator.truncate; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceService.ELASTIC_INFERENCE_SERVICE_IDENTIFIER; public class ElasticInferenceServiceSparseEmbeddingsRequestManager extends ElasticInferenceServiceRequestManager { @@ -40,7 +42,7 @@ public class ElasticInferenceServiceSparseEmbeddingsRequestManager extends Elast private static ResponseHandler createSparseEmbeddingsHandler() { return new ElasticInferenceServiceResponseHandler( - "Elastic Inference Service sparse embeddings", + String.format(Locale.ROOT, "%s sparse embeddings", ELASTIC_INFERENCE_SERVICE_IDENTIFIER), ElasticInferenceServiceSparseEmbeddingsResponseEntity::fromResponse ); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/elastic/ElasticInferenceServiceSparseEmbeddingsResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/elastic/ElasticInferenceServiceSparseEmbeddingsResponseEntity.java index 2b36cc5d22cd4..42ca45f75a9c0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/elastic/ElasticInferenceServiceSparseEmbeddingsResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/elastic/ElasticInferenceServiceSparseEmbeddingsResponseEntity.java @@ -33,7 +33,7 @@ public class ElasticInferenceServiceSparseEmbeddingsResponseEntity { "Failed to find required field [%s] in Elastic Inference Service embeddings response"; /** - * Parses the EIS json response. + * Parses the Elastic Inference Service json response. * * For a request like: * diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java index 24946ee5875a5..78673277797d2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntity.java @@ -30,6 +30,8 @@ public class GoogleVertexAiRerankResponseEntity { private static final String FAILED_TO_FIND_FIELD_TEMPLATE = "Failed to find required field [%s] in Google Vertex AI rerank response"; + private static final String INVALID_ID_FIELD_FORMAT_TEMPLATE = "Expected numeric value for record ID field in Google Vertex AI rerank " + + "response but received [%s]"; /** * Parses the Google Vertex AI rerank response. @@ -109,14 +111,27 @@ private static List doParse(XContentParser parser) throw new IllegalStateException(format(FAILED_TO_FIND_FIELD_TEMPLATE, RankedDoc.SCORE.getPreferredName())); } - return new RankedDocsResults.RankedDoc(index, parsedRankedDoc.score, parsedRankedDoc.content); + if (parsedRankedDoc.id == null) { + throw new IllegalStateException(format(FAILED_TO_FIND_FIELD_TEMPLATE, RankedDoc.ID.getPreferredName())); + } + + try { + return new RankedDocsResults.RankedDoc( + Integer.parseInt(parsedRankedDoc.id), + parsedRankedDoc.score, + parsedRankedDoc.content + ); + } catch (NumberFormatException e) { + throw new IllegalStateException(format(INVALID_ID_FIELD_FORMAT_TEMPLATE, parsedRankedDoc.id)); + } }); } - private record RankedDoc(@Nullable Float score, @Nullable String content) { + private record RankedDoc(@Nullable Float score, @Nullable String content, @Nullable String id) { private static final ParseField CONTENT = new ParseField("content"); private static final ParseField SCORE = new ParseField("score"); + private static final ParseField ID = new ParseField("id"); private static final ObjectParser PARSER = new ObjectParser<>( "google_vertex_ai_rerank_response", true, @@ -126,6 +141,7 @@ private record RankedDoc(@Nullable Float score, @Nullable String content) { static { PARSER.declareString(Builder::setContent, CONTENT); PARSER.declareFloat(Builder::setScore, SCORE); + PARSER.declareString(Builder::setId, ID); } public static RankedDoc parse(XContentParser parser) { @@ -137,6 +153,7 @@ private static final class Builder { private String content; private Float score; + private String id; private Builder() {} @@ -150,8 +167,13 @@ public Builder setContent(String content) { return this; } + public Builder setId(String id) { + this.id = id; + return this; + } + public RankedDoc build() { - return new RankedDoc(score, content); + return new RankedDoc(score, content, id); } } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index 8aa53430110b2..672cf6a025495 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -57,7 +57,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xpack.core.ml.inference.results.MlTextEmbeddingResults; import org.elasticsearch.xpack.core.ml.inference.results.TextExpansionResults; -import org.elasticsearch.xpack.inference.DefaultElserFeatureFlag; import java.io.IOException; import java.util.ArrayList; @@ -70,6 +69,7 @@ import java.util.Set; import java.util.function.Function; +import static org.elasticsearch.search.SearchService.DEFAULT_SIZE; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.CHUNKED_EMBEDDINGS_FIELD; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.CHUNKED_TEXT_FIELD; import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.CHUNKS_FIELD; @@ -90,8 +90,12 @@ public class SemanticTextFieldMapper extends FieldMapper implements InferenceFie public static final NodeFeature SEMANTIC_TEXT_SEARCH_INFERENCE_ID = new NodeFeature("semantic_text.search_inference_id"); public static final NodeFeature SEMANTIC_TEXT_DEFAULT_ELSER_2 = new NodeFeature("semantic_text.default_elser_2"); public static final NodeFeature SEMANTIC_TEXT_IN_OBJECT_FIELD_FIX = new NodeFeature("semantic_text.in_object_field_fix"); - public static final NodeFeature SEMANTIC_TEXT_SINGLE_FIELD_UPDATE_FIX = new NodeFeature("semantic_text.single_field_update_fix"); + public static final NodeFeature SEMANTIC_TEXT_DELETE_FIX = new NodeFeature("semantic_text.delete_fix"); + public static final NodeFeature SEMANTIC_TEXT_ZERO_SIZE_FIX = new NodeFeature("semantic_text.zero_size_fix"); + public static final NodeFeature SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX = new NodeFeature( + "semantic_text.always_emit_inference_id_fix" + ); public static final String CONTENT_TYPE = "semantic_text"; public static final String DEFAULT_ELSER_2_INFERENCE_ID = DEFAULT_ELSER_ID; @@ -111,18 +115,14 @@ public static class Builder extends FieldMapper.Builder { INFERENCE_ID_FIELD, false, mapper -> ((SemanticTextFieldType) mapper.fieldType()).inferenceId, - DefaultElserFeatureFlag.isEnabled() ? DEFAULT_ELSER_2_INFERENCE_ID : null + DEFAULT_ELSER_2_INFERENCE_ID ).addValidator(v -> { if (Strings.isEmpty(v)) { - // If the default ELSER feature flag is enabled, the only way we get here is if the user explicitly sets the param to an - // empty value. However, if the feature flag is disabled, we can get here if the user didn't set the param. - // Adjust the error message appropriately. - String message = DefaultElserFeatureFlag.isEnabled() - ? "[" + INFERENCE_ID_FIELD + "] on mapper [" + leafName() + "] of type [" + CONTENT_TYPE + "] must not be empty" - : "[" + INFERENCE_ID_FIELD + "] on mapper [" + leafName() + "] of type [" + CONTENT_TYPE + "] must be specified"; - throw new IllegalArgumentException(message); + throw new IllegalArgumentException( + "[" + INFERENCE_ID_FIELD + "] on mapper [" + leafName() + "] of type [" + CONTENT_TYPE + "] must not be empty" + ); } - }); + }).alwaysSerialize(); private final Parameter searchInferenceId = Parameter.stringParam( SEARCH_INFERENCE_ID_FIELD, @@ -512,7 +512,7 @@ public boolean fieldHasValue(FieldInfos fieldInfos) { return fieldInfos.fieldInfo(getEmbeddingsFieldName(name())) != null; } - public QueryBuilder semanticQuery(InferenceResults inferenceResults, float boost, String queryName) { + public QueryBuilder semanticQuery(InferenceResults inferenceResults, Integer requestSize, float boost, String queryName) { String nestedFieldPath = getChunksFieldName(name()); String inferenceResultsFieldName = getEmbeddingsFieldName(name()); QueryBuilder childQueryBuilder; @@ -556,7 +556,13 @@ public QueryBuilder semanticQuery(InferenceResults inferenceResults, float boost ); } - yield new KnnVectorQueryBuilder(inferenceResultsFieldName, inference, null, null, null, null); + Integer k = requestSize; + if (k != null) { + // Ensure that k is at least the default size so that aggregations work when size is set to 0 in the request + k = Math.max(k, DEFAULT_SIZE); + } + + yield new KnnVectorQueryBuilder(inferenceResultsFieldName, inference, k, null, null, null); } default -> throw new IllegalStateException( "Field [" diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java index 478f2e6a21868..d648db2fbfdbc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/queries/SemanticQueryBuilder.java @@ -170,7 +170,7 @@ private QueryBuilder doRewriteBuildSemanticQuery(SearchExecutionContext searchEx ); } - return semanticTextFieldType.semanticQuery(inferenceResults, boost(), queryName()); + return semanticTextFieldType.semanticQuery(inferenceResults, searchExecutionContext.requestSize(), boost(), queryName()); } else { throw new IllegalArgumentException( "Field [" + fieldName + "] of type [" + fieldType.typeName() + "] does not support " + NAME + " queries" diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java index 967ad4b46dcb3..83b2a8a0f5182 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestGetInferenceModelAction.java @@ -15,10 +15,7 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.inference.action.GetInferenceModelAction; -import org.elasticsearch.xpack.inference.DefaultElserFeatureFlag; -import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -69,11 +66,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient @Override public Set supportedCapabilities() { - Set capabilities = new HashSet<>(); - if (DefaultElserFeatureFlag.isEnabled()) { - capabilities.add(DEFAULT_ELSER_2_CAPABILITY); - } - - return Collections.unmodifiableSet(capabilities); + return Set.of(DEFAULT_ELSER_2_CAPABILITY); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestPutInferenceModelAction.java index 0523160ee19c2..655e11996d522 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestPutInferenceModelAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.rest; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.BaseRestHandler; @@ -48,12 +49,12 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient taskType = TaskType.ANY; // task type must be defined in the body } - var request = new PutInferenceModelAction.Request( - taskType, - inferenceEntityId, - restRequest.requiredContent(), - restRequest.getXContentType() + var content = restRequest.requiredContent(); + var request = new PutInferenceModelAction.Request(taskType, inferenceEntityId, content, restRequest.getXContentType()); + return channel -> client.execute( + PutInferenceModelAction.INSTANCE, + request, + ActionListener.withRef(new RestToXContentListener<>(channel), content) ); - return channel -> client.execute(PutInferenceModelAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUpdateInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUpdateInferenceModelAction.java index 9405a6752538c..120731a4f8e66 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUpdateInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestUpdateInferenceModelAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.rest; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.BaseRestHandler; @@ -50,13 +51,18 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient throw new ElasticsearchStatusException("Inference ID must be provided in the path", RestStatus.BAD_REQUEST); } + var content = restRequest.requiredContent(); var request = new UpdateInferenceModelAction.Request( inferenceEntityId, - restRequest.requiredContent(), + content, restRequest.getXContentType(), taskType, RestUtils.getMasterNodeTimeout(restRequest) ); - return channel -> client.execute(UpdateInferenceModelAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute( + UpdateInferenceModelAction.INSTANCE, + request, + ActionListener.withRef(new RestToXContentListener<>(channel), content) + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index b8a99227cf517..8e2dac1ef9db2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -12,7 +12,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; @@ -76,13 +75,12 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { init(); // a non-null query is not supported and is dropped by all providers - doChunkedInfer(model, new DocumentsOnlyInput(input), taskSettings, inputType, chunkingOptions, timeout, listener); + doChunkedInfer(model, new DocumentsOnlyInput(input), taskSettings, inputType, timeout, listener); } protected abstract void doInfer( @@ -99,7 +97,6 @@ protected abstract void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java index c84b4314b9d1a..d7ac7caed7efc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java @@ -15,10 +15,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; -import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; @@ -51,6 +49,7 @@ import org.elasticsearch.xpack.inference.services.alibabacloudsearch.sparse.AlibabaCloudSearchSparseModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.util.EnumSet; import java.util.HashMap; @@ -60,7 +59,6 @@ import static org.elasticsearch.inference.TaskType.SPARSE_EMBEDDING; import static org.elasticsearch.inference.TaskType.TEXT_EMBEDDING; -import static org.elasticsearch.xpack.core.inference.action.InferenceAction.Request.DEFAULT_TIMEOUT; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createInvalidModelException; import static org.elasticsearch.xpack.inference.services.ServiceUtils.parsePersistedConfigErrorMsg; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMap; @@ -290,7 +288,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -332,68 +329,39 @@ private EmbeddingRequestChunker.EmbeddingType getEmbeddingTypeFromTaskType(TaskT */ @Override public void checkModelConfig(Model model, ActionListener listener) { + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); + } + + @Override + public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { if (model instanceof AlibabaCloudSearchEmbeddingsModel embeddingsModel) { - ServiceUtils.getEmbeddingSize( - model, - this, - listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) + var serviceSettings = embeddingsModel.getServiceSettings(); + + var updatedServiceSettings = new AlibabaCloudSearchEmbeddingsServiceSettings( + new AlibabaCloudSearchServiceSettings( + serviceSettings.getCommonSettings().modelId(), + serviceSettings.getCommonSettings().getHost(), + serviceSettings.getCommonSettings().getWorkspaceName(), + serviceSettings.getCommonSettings().getHttpSchema(), + serviceSettings.getCommonSettings().rateLimitSettings() + ), + SimilarityMeasure.DOT_PRODUCT, + embeddingSize, + serviceSettings.getMaxInputTokens() ); + + return new AlibabaCloudSearchEmbeddingsModel(embeddingsModel, updatedServiceSettings); } else { - checkAlibabaCloudSearchServiceConfig(model, this, listener); + throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); } } - private AlibabaCloudSearchEmbeddingsModel updateModelWithEmbeddingDetails(AlibabaCloudSearchEmbeddingsModel model, int embeddingSize) { - AlibabaCloudSearchEmbeddingsServiceSettings serviceSettings = new AlibabaCloudSearchEmbeddingsServiceSettings( - new AlibabaCloudSearchServiceSettings( - model.getServiceSettings().getCommonSettings().modelId(), - model.getServiceSettings().getCommonSettings().getHost(), - model.getServiceSettings().getCommonSettings().getWorkspaceName(), - model.getServiceSettings().getCommonSettings().getHttpSchema(), - model.getServiceSettings().getCommonSettings().rateLimitSettings() - ), - SimilarityMeasure.DOT_PRODUCT, - embeddingSize, - model.getServiceSettings().getMaxInputTokens() - ); - - return new AlibabaCloudSearchEmbeddingsModel(model, serviceSettings); - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; } - /** - * For other models except of text embedding - * check the model's service settings and task settings - * - * @param model The new model - * @param service The inferenceService - * @param listener The listener - */ - private void checkAlibabaCloudSearchServiceConfig(Model model, InferenceService service, ActionListener listener) { - String input = ALIBABA_CLOUD_SEARCH_SERVICE_CONFIG_INPUT; - String query = model.getTaskType().equals(TaskType.RERANK) ? ALIBABA_CLOUD_SEARCH_SERVICE_CONFIG_QUERY : null; - - service.infer( - model, - query, - List.of(input), - false, - Map.of(), - InputType.INGEST, - DEFAULT_TIMEOUT, - listener.delegateFailureAndWrap((delegate, r) -> { - listener.onResponse(model); - }) - ); - } - - private static final String ALIBABA_CLOUD_SEARCH_SERVICE_CONFIG_INPUT = "input"; - private static final String ALIBABA_CLOUD_SEARCH_SERVICE_CONFIG_QUERY = "query"; - public static class Configuration { public static InferenceServiceConfiguration get() { return configuration.getOrCompute(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java index f9822c7ab4af9..48b3c3df03e11 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockService.java @@ -17,7 +17,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -49,6 +48,7 @@ import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.io.IOException; import java.util.EnumSet; @@ -113,7 +113,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -303,49 +302,34 @@ public Set supportedStreamingTasks() { */ @Override public void checkModelConfig(Model model, ActionListener listener) { - if (model instanceof AmazonBedrockEmbeddingsModel embeddingsModel) { - ServiceUtils.getEmbeddingSize( - model, - this, - listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) - ); - } else { - listener.onResponse(model); - } + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); } - private AmazonBedrockEmbeddingsModel updateModelWithEmbeddingDetails(AmazonBedrockEmbeddingsModel model, int embeddingSize) { - AmazonBedrockEmbeddingsServiceSettings serviceSettings = model.getServiceSettings(); - if (serviceSettings.dimensionsSetByUser() - && serviceSettings.dimensions() != null - && serviceSettings.dimensions() != embeddingSize) { - throw new ElasticsearchStatusException( - Strings.format( - "The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. " - + "Please recreate the [%s] configuration with the correct dimensions", - embeddingSize, - serviceSettings.dimensions(), - model.getConfigurations().getInferenceEntityId() - ), - RestStatus.BAD_REQUEST + @Override + public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { + if (model instanceof AmazonBedrockEmbeddingsModel embeddingsModel) { + var serviceSettings = embeddingsModel.getServiceSettings(); + var similarityFromModel = serviceSettings.similarity(); + var similarityToUse = similarityFromModel == null + ? getProviderDefaultSimilarityMeasure(embeddingsModel.provider()) + : similarityFromModel; + + var updatedServiceSettings = new AmazonBedrockEmbeddingsServiceSettings( + serviceSettings.region(), + serviceSettings.modelId(), + serviceSettings.provider(), + embeddingSize, + serviceSettings.dimensionsSetByUser(), + serviceSettings.maxInputTokens(), + similarityToUse, + serviceSettings.rateLimitSettings() ); - } - - var similarityFromModel = serviceSettings.similarity(); - var similarityToUse = similarityFromModel == null ? getProviderDefaultSimilarityMeasure(model.provider()) : similarityFromModel; - - AmazonBedrockEmbeddingsServiceSettings settingsToUse = new AmazonBedrockEmbeddingsServiceSettings( - serviceSettings.region(), - serviceSettings.modelId(), - serviceSettings.provider(), - embeddingSize, - serviceSettings.dimensionsSetByUser(), - serviceSettings.maxInputTokens(), - similarityToUse, - serviceSettings.rateLimitSettings() - ); - return new AmazonBedrockEmbeddingsModel(model, settingsToUse); + return new AmazonBedrockEmbeddingsModel(embeddingsModel, updatedServiceSettings); + } else { + throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); + } } private static void checkProviderForTask(TaskType taskType, AmazonBedrockProvider provider) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java index 556b34b945c14..b3d503de8e3eb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -39,6 +38,7 @@ import org.elasticsearch.xpack.inference.services.anthropic.completion.AnthropicChatCompletionModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.util.EnumSet; import java.util.HashMap; @@ -176,6 +176,12 @@ public AnthropicModel parsePersistedConfig(String inferenceEntityId, TaskType ta ); } + @Override + public void checkModelConfig(Model model, ActionListener listener) { + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); + } + @Override public InferenceServiceConfiguration getConfiguration() { return Configuration.get(); @@ -213,7 +219,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java index a2f8dc409585e..bba331fc0b5df 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioService.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -107,7 +106,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java index 6d36e5f6c8fe7..16c94dfa9ad94 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiService.java @@ -11,12 +11,10 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -46,6 +44,7 @@ import org.elasticsearch.xpack.inference.services.azureopenai.embeddings.AzureOpenAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.azureopenai.embeddings.AzureOpenAiEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.util.EnumSet; import java.util.HashMap; @@ -261,7 +260,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -294,48 +292,32 @@ protected void doChunkedInfer( */ @Override public void checkModelConfig(Model model, ActionListener listener) { - if (model instanceof AzureOpenAiEmbeddingsModel embeddingsModel) { - ServiceUtils.getEmbeddingSize( - model, - this, - listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) - ); - } else { - listener.onResponse(model); - } + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); } - private AzureOpenAiEmbeddingsModel updateModelWithEmbeddingDetails(AzureOpenAiEmbeddingsModel model, int embeddingSize) { - if (model.getServiceSettings().dimensionsSetByUser() - && model.getServiceSettings().dimensions() != null - && model.getServiceSettings().dimensions() != embeddingSize) { - throw new ElasticsearchStatusException( - Strings.format( - "The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. " - + "Please recreate the [%s] configuration with the correct dimensions", - embeddingSize, - model.getServiceSettings().dimensions(), - model.getConfigurations().getInferenceEntityId() - ), - RestStatus.BAD_REQUEST + @Override + public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { + if (model instanceof AzureOpenAiEmbeddingsModel embeddingsModel) { + var serviceSettings = embeddingsModel.getServiceSettings(); + var similarityFromModel = serviceSettings.similarity(); + var similarityToUse = similarityFromModel == null ? SimilarityMeasure.DOT_PRODUCT : similarityFromModel; + + var updatedServiceSettings = new AzureOpenAiEmbeddingsServiceSettings( + serviceSettings.resourceName(), + serviceSettings.deploymentId(), + serviceSettings.apiVersion(), + embeddingSize, + serviceSettings.dimensionsSetByUser(), + serviceSettings.maxInputTokens(), + similarityToUse, + serviceSettings.rateLimitSettings() ); - } - - var similarityFromModel = model.getServiceSettings().similarity(); - var similarityToUse = similarityFromModel == null ? SimilarityMeasure.DOT_PRODUCT : similarityFromModel; - - AzureOpenAiEmbeddingsServiceSettings serviceSettings = new AzureOpenAiEmbeddingsServiceSettings( - model.getServiceSettings().resourceName(), - model.getServiceSettings().deploymentId(), - model.getServiceSettings().apiVersion(), - embeddingSize, - model.getServiceSettings().dimensionsSetByUser(), - model.getServiceSettings().maxInputTokens(), - similarityToUse, - model.getServiceSettings().rateLimitSettings() - ); - return new AzureOpenAiEmbeddingsModel(model, serviceSettings); + return new AzureOpenAiEmbeddingsModel(embeddingsModel, updatedServiceSettings); + } else { + throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); + } } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java index de1d055e160da..b3d8b3b6efce3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -45,6 +44,7 @@ import org.elasticsearch.xpack.inference.services.cohere.rerank.CohereRerankModel; import org.elasticsearch.xpack.inference.services.settings.DefaultSecretSettings; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.util.EnumSet; import java.util.HashMap; @@ -259,7 +259,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -293,36 +292,35 @@ protected void doChunkedInfer( */ @Override public void checkModelConfig(Model model, ActionListener listener) { + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); + } + + @Override + public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { if (model instanceof CohereEmbeddingsModel embeddingsModel) { - ServiceUtils.getEmbeddingSize( - model, - this, - listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) + var serviceSettings = embeddingsModel.getServiceSettings(); + var similarityFromModel = serviceSettings.similarity(); + var similarityToUse = similarityFromModel == null ? defaultSimilarity() : similarityFromModel; + + var updatedServiceSettings = new CohereEmbeddingsServiceSettings( + new CohereServiceSettings( + serviceSettings.getCommonSettings().uri(), + similarityToUse, + embeddingSize, + serviceSettings.getCommonSettings().maxInputTokens(), + serviceSettings.getCommonSettings().modelId(), + serviceSettings.getCommonSettings().rateLimitSettings() + ), + serviceSettings.getEmbeddingType() ); + + return new CohereEmbeddingsModel(embeddingsModel, updatedServiceSettings); } else { - listener.onResponse(model); + throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); } } - private CohereEmbeddingsModel updateModelWithEmbeddingDetails(CohereEmbeddingsModel model, int embeddingSize) { - var userDefinedSimilarity = model.getServiceSettings().similarity(); - var similarityToUse = userDefinedSimilarity == null ? defaultSimilarity() : userDefinedSimilarity; - - CohereEmbeddingsServiceSettings serviceSettings = new CohereEmbeddingsServiceSettings( - new CohereServiceSettings( - model.getServiceSettings().getCommonSettings().uri(), - similarityToUse, - embeddingSize, - model.getServiceSettings().getCommonSettings().maxInputTokens(), - model.getServiceSettings().getCommonSettings().modelId(), - model.getServiceSettings().getCommonSettings().rateLimitSettings() - ), - model.getServiceSettings().getEmbeddingType() - ); - - return new CohereEmbeddingsModel(model, serviceSettings); - } - /** * Return the default similarity measure for the embedding type. * Cohere embeddings are normalized to unit vectors therefor Dot diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java index 98429ed3d001d..1f08c06edaa91 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -62,6 +61,7 @@ public class ElasticInferenceService extends SenderService { public static final String NAME = "elastic"; + public static final String ELASTIC_INFERENCE_SERVICE_IDENTIFIER = "Elastic Inference Service"; private final ElasticInferenceServiceComponents elasticInferenceServiceComponents; @@ -70,10 +70,10 @@ public class ElasticInferenceService extends SenderService { public ElasticInferenceService( HttpRequestSender.Factory factory, ServiceComponents serviceComponents, - ElasticInferenceServiceComponents eisComponents + ElasticInferenceServiceComponents elasticInferenceServiceComponents ) { super(factory, serviceComponents); - this.elasticInferenceServiceComponents = eisComponents; + this.elasticInferenceServiceComponents = elasticInferenceServiceComponents; } @Override @@ -108,7 +108,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceComponents.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceComponents.java index 4386964e927d2..c5b2cb693df13 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceComponents.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceComponents.java @@ -7,4 +7,4 @@ package org.elasticsearch.xpack.inference.services.elastic; -public record ElasticInferenceServiceComponents(String eisGatewayUrl) {} +public record ElasticInferenceServiceComponents(String elasticInferenceServiceUrl) {} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java index b0fb6d14ee6f7..324c20d0e48bf 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java @@ -15,6 +15,8 @@ */ public class ElasticInferenceServiceFeature { - public static final FeatureFlag ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG = new FeatureFlag("eis"); + @Deprecated + public static final FeatureFlag DEPRECATED_ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG = new FeatureFlag("eis"); + public static final FeatureFlag ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG = new FeatureFlag("elastic_inference_service"); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java index 8525710c6cf23..bc2daddc2a346 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSettings.java @@ -14,20 +14,37 @@ public class ElasticInferenceServiceSettings { + @Deprecated static final Setting EIS_GATEWAY_URL = Setting.simpleString("xpack.inference.eis.gateway.url", Setting.Property.NodeScope); + static final Setting ELASTIC_INFERENCE_SERVICE_URL = Setting.simpleString( + "xpack.inference.elastic.url", + Setting.Property.NodeScope + ); + // Adjust this variable to be volatile, if the setting can be updated at some point in time + @Deprecated private final String eisGatewayUrl; + private final String elasticInferenceServiceUrl; + public ElasticInferenceServiceSettings(Settings settings) { eisGatewayUrl = EIS_GATEWAY_URL.get(settings); + elasticInferenceServiceUrl = ELASTIC_INFERENCE_SERVICE_URL.get(settings); + } public static List> getSettingsDefinitions() { - return List.of(EIS_GATEWAY_URL); + return List.of(EIS_GATEWAY_URL, ELASTIC_INFERENCE_SERVICE_URL); } + @Deprecated public String getEisGatewayUrl() { return eisGatewayUrl; } + + public String getElasticInferenceServiceUrl() { + return elasticInferenceServiceUrl; + } + } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java index bbbae736dbeb9..cc69df86933de 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsModel.java @@ -22,8 +22,11 @@ import java.net.URI; import java.net.URISyntaxException; +import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.inference.services.elastic.ElasticInferenceService.ELASTIC_INFERENCE_SERVICE_IDENTIFIER; + public class ElasticInferenceServiceSparseEmbeddingsModel extends ElasticInferenceServiceModel { private final URI uri; @@ -105,9 +108,13 @@ private URI createUri() throws URISyntaxException { switch (modelId) { case ElserModels.ELSER_V2_MODEL -> modelIdUriPath = "ELSERv2"; - default -> throw new IllegalArgumentException("Unsupported model for EIS [" + modelId + "]"); + default -> throw new IllegalArgumentException( + String.format(Locale.ROOT, "Unsupported model for %s [%s]", ELASTIC_INFERENCE_SERVICE_IDENTIFIER, modelId) + ); } - return new URI(elasticInferenceServiceComponents().eisGatewayUrl() + "/sparse-text-embedding/" + modelIdUriPath); + return new URI( + elasticInferenceServiceComponents().elasticInferenceServiceUrl() + "/api/v1/sparse-text-embedding/" + modelIdUriPath + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java index 922b366498c27..f743b94df3810 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelPrefixStrings; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; -import org.elasticsearch.xpack.inference.DefaultElserFeatureFlag; import org.elasticsearch.xpack.inference.InferencePlugin; import java.io.IOException; @@ -157,6 +156,8 @@ protected void putModel(Model model, ActionListener listener) { putBuiltInModel(e5Model.getServiceSettings().modelId(), listener); } else if (model instanceof ElserInternalModel elserModel) { putBuiltInModel(elserModel.getServiceSettings().modelId(), listener); + } else if (model instanceof ElasticRerankerModel elasticRerankerModel) { + putBuiltInModel(elasticRerankerModel.getServiceSettings().modelId(), listener); } else if (model instanceof CustomElandModel) { logger.info("Custom eland model detected, model must have been already loaded into the cluster with eland."); listener.onResponse(Boolean.TRUE); @@ -296,11 +297,6 @@ protected void maybeStartDeployment( InferModelAction.Request request, ActionListener listener ) { - if (DefaultElserFeatureFlag.isEnabled() == false) { - listener.onFailure(e); - return; - } - if (isDefaultId(model.getInferenceEntityId()) && ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { this.start(model, request.getInferenceTimeout(), listener.delegateFailureAndWrap((l, started) -> { client.execute(InferModelAction.INSTANCE, request, listener); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java index b710b24cbda31..b76de5eeedbfc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java @@ -7,14 +7,9 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.ChunkingSettings; -import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; -import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; public class CustomElandModel extends ElasticsearchInternalModel { @@ -39,31 +34,10 @@ public CustomElandModel( } @Override - public ActionListener getCreateTrainedModelAssignmentActionListener( - Model model, - ActionListener listener - ) { - - return new ActionListener<>() { - @Override - public void onResponse(CreateTrainedModelAssignmentAction.Response response) { - listener.onResponse(Boolean.TRUE); - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { - listener.onFailure( - new ResourceNotFoundException( - "Could not start the inference as the custom eland model [{0}] for this platform cannot be found." - + " Custom models need to be loaded into the cluster with eland before they can be started.", - internalServiceSettings.modelId() - ) - ); - return; - } - listener.onFailure(e); - } - }; + protected String modelNotFoundErrorMessage(String modelId) { + return "Could not deploy model [" + + modelId + + "] as the model cannot be found." + + " Custom models need to be loaded into the cluster with Eland before they can be started."; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java index 724c7a8f0a166..ce6c6258d0393 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java @@ -36,6 +36,11 @@ public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentA throw new IllegalStateException("cannot start model that uses an existing deployment"); } + @Override + protected String modelNotFoundErrorMessage(String modelId) { + throw new IllegalStateException("cannot start model [" + modelId + "] that uses an existing deployment"); + } + @Override public ActionListener getCreateTrainedModelAssignmentActionListener( Model model, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java new file mode 100644 index 0000000000000..115cc9f05599a --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerModel.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +public class ElasticRerankerModel extends ElasticsearchInternalModel { + + public ElasticRerankerModel( + String inferenceEntityId, + TaskType taskType, + String service, + ElasticRerankerServiceSettings serviceSettings, + ChunkingSettings chunkingSettings + ) { + super(inferenceEntityId, taskType, service, serviceSettings, chunkingSettings); + } + + @Override + public ElasticRerankerServiceSettings getServiceSettings() { + return (ElasticRerankerServiceSettings) super.getServiceSettings(); + } + + @Override + public ActionListener getCreateTrainedModelAssignmentActionListener( + Model model, + ActionListener listener + ) { + + return new ActionListener<>() { + @Override + public void onResponse(CreateTrainedModelAssignmentAction.Response response) { + listener.onResponse(Boolean.TRUE); + } + + @Override + public void onFailure(Exception e) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { + listener.onFailure( + new ResourceNotFoundException("Could not start the Elastic Reranker Endpoint due to [{}]", e, e.getMessage()) + ); + return; + } + listener.onFailure(e); + } + }; + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerServiceSettings.java new file mode 100644 index 0000000000000..316dc092e03c7 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticRerankerServiceSettings.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; + +import java.io.IOException; +import java.util.Map; + +public class ElasticRerankerServiceSettings extends ElasticsearchInternalServiceSettings { + + public static final String NAME = "elastic_reranker_service_settings"; + + public ElasticRerankerServiceSettings(ElasticsearchInternalServiceSettings other) { + super(other); + } + + public ElasticRerankerServiceSettings( + Integer numAllocations, + int numThreads, + String modelId, + AdaptiveAllocationsSettings adaptiveAllocationsSettings + ) { + super(numAllocations, numThreads, modelId, adaptiveAllocationsSettings); + } + + public ElasticRerankerServiceSettings(StreamInput in) throws IOException { + super(in); + } + + /** + * Parse the ElasticRerankerServiceSettings from map and validate the setting values. + * + * If required setting are missing or the values are invalid an + * {@link ValidationException} is thrown. + * + * @param map Source map containing the config + * @return The builder + */ + public static Builder fromRequestMap(Map map) { + ValidationException validationException = new ValidationException(); + var baseSettings = ElasticsearchInternalServiceSettings.fromMap(map, validationException); + + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + return baseSettings; + } + + @Override + public String getWriteableName() { + return ElasticRerankerServiceSettings.NAME; + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java index 2405243f302bc..aa12bf0c645c3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.core.TimeValue; @@ -15,8 +18,10 @@ import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; @@ -79,10 +84,38 @@ public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentA return startRequest; } - public abstract ActionListener getCreateTrainedModelAssignmentActionListener( + public ActionListener getCreateTrainedModelAssignmentActionListener( Model model, ActionListener listener - ); + ) { + return new ActionListener<>() { + @Override + public void onResponse(CreateTrainedModelAssignmentAction.Response response) { + listener.onResponse(Boolean.TRUE); + } + + @Override + public void onFailure(Exception e) { + var cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ResourceNotFoundException) { + listener.onFailure(new ResourceNotFoundException(modelNotFoundErrorMessage(internalServiceSettings.modelId()))); + return; + } else if (cause instanceof ElasticsearchStatusException statusException) { + if (statusException.status() == RestStatus.CONFLICT + && statusException.getRootCause() instanceof ResourceAlreadyExistsException) { + // Deployment is already started + listener.onResponse(Boolean.TRUE); + } + return; + } + listener.onFailure(e); + } + }; + } + + protected String modelNotFoundErrorMessage(String modelId) { + return "Could not deploy model [" + modelId + "] as the model cannot be found."; + } public boolean usesExistingDeployment() { return internalServiceSettings.getDeploymentId() != null; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index fe83acc8574aa..2ec3a9d629434 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -19,7 +19,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceResults; @@ -97,6 +96,8 @@ public class ElasticsearchInternalService extends BaseElasticsearchInternalServi MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 ); + public static final String RERANKER_ID = ".rerank-v1"; + public static final int EMBEDDING_MAX_BATCH_SIZE = 10; public static final String DEFAULT_ELSER_ID = ".elser-2-elasticsearch"; public static final String DEFAULT_E5_ID = ".multilingual-e5-small-elasticsearch"; @@ -223,6 +224,8 @@ public void parseRequestConfig( ) ) ); + } else if (RERANKER_ID.equals(modelId)) { + rerankerCase(inferenceEntityId, taskType, config, serviceSettingsMap, chunkingSettings, modelListener); } else { customElandCase(inferenceEntityId, taskType, serviceSettingsMap, taskSettingsMap, chunkingSettings, modelListener); } @@ -323,6 +326,31 @@ private static CustomElandInternalServiceSettings elandServiceSettings( }; } + private void rerankerCase( + String inferenceEntityId, + TaskType taskType, + Map config, + Map serviceSettingsMap, + ChunkingSettings chunkingSettings, + ActionListener modelListener + ) { + + var esServiceSettingsBuilder = ElasticsearchInternalServiceSettings.fromRequestMap(serviceSettingsMap); + + throwIfNotEmptyMap(config, name()); + throwIfNotEmptyMap(serviceSettingsMap, name()); + + modelListener.onResponse( + new ElasticRerankerModel( + inferenceEntityId, + taskType, + NAME, + new ElasticRerankerServiceSettings(esServiceSettingsBuilder.build()), + chunkingSettings + ) + ); + } + private void e5Case( String inferenceEntityId, TaskType taskType, @@ -647,11 +675,10 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { - chunkedInfer(model, null, input, taskSettings, inputType, chunkingOptions, timeout, listener); + chunkedInfer(model, null, input, taskSettings, inputType, timeout, listener); } @Override @@ -661,7 +688,6 @@ public void chunkedInfer( List input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -833,6 +859,7 @@ public void updateModelsWithDynamicFields(List models, ActionListener> defaultsListener) { preferredModelVariantFn.accept(defaultsListener.delegateFailureAndWrap((delegate, preferredModelVariant) -> { if (PreferredModelVariant.LINUX_X86_OPTIMIZED.equals(preferredModelVariant)) { @@ -863,7 +890,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), ElserMlNodeTaskSettings.DEFAULT, - null // default chunking settings + ChunkingSettingsBuilder.DEFAULT_SETTINGS ); var defaultE5 = new MultilingualE5SmallModel( DEFAULT_E5_ID, @@ -875,7 +902,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { useLinuxOptimizedModel ? MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 : MULTILINGUAL_E5_SMALL_MODEL_ID, new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), - null // default chunking settings + ChunkingSettingsBuilder.DEFAULT_SETTINGS ); return List.of(defaultElser, defaultE5); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModel.java index 8d2f59171a601..2594f18db3fb5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModel.java @@ -7,13 +7,8 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.ChunkingSettings; -import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; -import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; public class ElserInternalModel extends ElasticsearchInternalModel { @@ -37,31 +32,4 @@ public ElserInternalServiceSettings getServiceSettings() { public ElserMlNodeTaskSettings getTaskSettings() { return (ElserMlNodeTaskSettings) super.getTaskSettings(); } - - @Override - public ActionListener getCreateTrainedModelAssignmentActionListener( - Model model, - ActionListener listener - ) { - return new ActionListener<>() { - @Override - public void onResponse(CreateTrainedModelAssignmentAction.Response response) { - listener.onResponse(Boolean.TRUE); - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { - listener.onFailure( - new ResourceNotFoundException( - "Could not start the ELSER service as the ELSER model for this platform cannot be found." - + " ELSER needs to be downloaded before it can be started." - ) - ); - return; - } - listener.onFailure(e); - } - }; - } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java index fee00d04d940b..2dcf91140c995 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/MultilingualE5SmallModel.java @@ -7,13 +7,8 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.ChunkingSettings; -import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; -import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; public class MultilingualE5SmallModel extends ElasticsearchInternalModel { @@ -31,34 +26,4 @@ public MultilingualE5SmallModel( public MultilingualE5SmallInternalServiceSettings getServiceSettings() { return (MultilingualE5SmallInternalServiceSettings) super.getServiceSettings(); } - - @Override - public ActionListener getCreateTrainedModelAssignmentActionListener( - Model model, - ActionListener listener - ) { - - return new ActionListener<>() { - @Override - public void onResponse(CreateTrainedModelAssignmentAction.Response response) { - listener.onResponse(Boolean.TRUE); - } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { - listener.onFailure( - new ResourceNotFoundException( - "Could not start the TextEmbeddingService service as the " - + "Multilingual-E5-Small model for this platform cannot be found." - + " Multilingual-E5-Small needs to be downloaded before it can be started" - ) - ); - return; - } - listener.onFailure(e); - } - }; - } - } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java index 1c01ebbe2c0e4..57a8a66a3f3a6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -315,7 +314,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java index a05b1a937d376..857d475499aae 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -11,12 +11,10 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -45,6 +43,7 @@ import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.util.EnumSet; import java.util.HashMap; @@ -181,15 +180,8 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void checkModelConfig(Model model, ActionListener listener) { - if (model instanceof GoogleVertexAiEmbeddingsModel embeddingsModel) { - ServiceUtils.getEmbeddingSize( - model, - this, - listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) - ); - } else { - listener.onResponse(model); - } + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); } @Override @@ -220,7 +212,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -240,34 +231,26 @@ protected void doChunkedInfer( } } - private GoogleVertexAiEmbeddingsModel updateModelWithEmbeddingDetails(GoogleVertexAiEmbeddingsModel model, int embeddingSize) { - if (model.getServiceSettings().dimensionsSetByUser() - && model.getServiceSettings().dimensions() != null - && model.getServiceSettings().dimensions() != embeddingSize) { - throw new ElasticsearchStatusException( - Strings.format( - "The retrieved embeddings size [%s] does not match the size specified in the settings [%s]. " - + "Please recreate the [%s] configuration with the correct dimensions", - embeddingSize, - model.getServiceSettings().dimensions(), - model.getConfigurations().getInferenceEntityId() - ), - RestStatus.BAD_REQUEST + @Override + public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { + if (model instanceof GoogleVertexAiEmbeddingsModel embeddingsModel) { + var serviceSettings = embeddingsModel.getServiceSettings(); + + var updatedServiceSettings = new GoogleVertexAiEmbeddingsServiceSettings( + serviceSettings.location(), + serviceSettings.projectId(), + serviceSettings.modelId(), + serviceSettings.dimensionsSetByUser(), + serviceSettings.maxInputTokens(), + embeddingSize, + serviceSettings.similarity(), + serviceSettings.rateLimitSettings() ); - } - - GoogleVertexAiEmbeddingsServiceSettings serviceSettings = new GoogleVertexAiEmbeddingsServiceSettings( - model.getServiceSettings().location(), - model.getServiceSettings().projectId(), - model.getServiceSettings().modelId(), - model.getServiceSettings().dimensionsSetByUser(), - model.getServiceSettings().maxInputTokens(), - embeddingSize, - model.getServiceSettings().similarity(), - model.getServiceSettings().rateLimitSettings() - ); - return new GoogleVertexAiEmbeddingsModel(model, serviceSettings); + return new GoogleVertexAiEmbeddingsModel(embeddingsModel, updatedServiceSettings); + } else { + throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); + } } private static GoogleVertexAiModel createModelFromPersistent( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java index eede14a975234..51cca72f26054 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -116,7 +115,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index a2e22e24172cf..75920efa251f2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -88,7 +87,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java index e960b0b777f2b..ea263fb77a2da 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java @@ -15,7 +15,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -30,6 +30,7 @@ import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsBuilder; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; import org.elasticsearch.xpack.inference.external.action.ibmwatsonx.IbmWatsonxActionCreator; import org.elasticsearch.xpack.inference.external.http.sender.DocumentsOnlyInput; @@ -42,6 +43,7 @@ import org.elasticsearch.xpack.inference.services.ServiceUtils; import org.elasticsearch.xpack.inference.services.ibmwatsonx.embeddings.IbmWatsonxEmbeddingsModel; import org.elasticsearch.xpack.inference.services.ibmwatsonx.embeddings.IbmWatsonxEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.validation.ModelValidatorBuilder; import java.util.EnumSet; import java.util.HashMap; @@ -86,11 +88,19 @@ public void parseRequestConfig( Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + ChunkingSettings chunkingSettings = null; + if (TaskType.TEXT_EMBEDDING.equals(taskType)) { + chunkingSettings = ChunkingSettingsBuilder.fromMap( + removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS) + ); + } + IbmWatsonxModel model = createModel( inferenceEntityId, taskType, serviceSettingsMap, taskSettingsMap, + chunkingSettings, serviceSettingsMap, TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), ConfigurationParseContext.REQUEST @@ -112,6 +122,7 @@ private static IbmWatsonxModel createModel( TaskType taskType, Map serviceSettings, Map taskSettings, + ChunkingSettings chunkingSettings, @Nullable Map secretSettings, String failureMessage, ConfigurationParseContext context @@ -123,6 +134,7 @@ private static IbmWatsonxModel createModel( NAME, serviceSettings, taskSettings, + chunkingSettings, secretSettings, context ); @@ -141,11 +153,17 @@ public IbmWatsonxModel parsePersistedConfigWithSecrets( Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); Map secretSettingsMap = removeFromMapOrDefaultEmpty(secrets, ModelSecrets.SECRET_SETTINGS); + ChunkingSettings chunkingSettings = null; + if (TaskType.TEXT_EMBEDDING.equals(taskType)) { + chunkingSettings = ChunkingSettingsBuilder.fromMap(removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS)); + } + return createModelFromPersistent( inferenceEntityId, taskType, serviceSettingsMap, taskSettingsMap, + chunkingSettings, secretSettingsMap, parsePersistedConfigErrorMsg(inferenceEntityId, NAME) ); @@ -166,6 +184,7 @@ private static IbmWatsonxModel createModelFromPersistent( TaskType taskType, Map serviceSettings, Map taskSettings, + ChunkingSettings chunkingSettings, Map secretSettings, String failureMessage ) { @@ -174,6 +193,7 @@ private static IbmWatsonxModel createModelFromPersistent( taskType, serviceSettings, taskSettings, + chunkingSettings, secretSettings, failureMessage, ConfigurationParseContext.PERSISTENT @@ -185,11 +205,17 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); Map taskSettingsMap = removeFromMapOrDefaultEmpty(config, ModelConfigurations.TASK_SETTINGS); + ChunkingSettings chunkingSettings = null; + if (TaskType.TEXT_EMBEDDING.equals(taskType)) { + chunkingSettings = ChunkingSettingsBuilder.fromMap(removeFromMapOrDefaultEmpty(config, ModelConfigurations.CHUNKING_SETTINGS)); + } + return createModelFromPersistent( inferenceEntityId, taskType, serviceSettingsMap, taskSettingsMap, + chunkingSettings, null, parsePersistedConfigErrorMsg(inferenceEntityId, NAME) ); @@ -202,35 +228,34 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void checkModelConfig(Model model, ActionListener listener) { + // TODO: Remove this function once all services have been updated to use the new model validators + ModelValidatorBuilder.buildModelValidator(model.getTaskType()).validate(this, model, listener); + } + + @Override + public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { if (model instanceof IbmWatsonxEmbeddingsModel embeddingsModel) { - ServiceUtils.getEmbeddingSize( - model, - this, - listener.delegateFailureAndWrap((l, size) -> l.onResponse(updateModelWithEmbeddingDetails(embeddingsModel, size))) + var serviceSettings = embeddingsModel.getServiceSettings(); + var similarityFromModel = serviceSettings.similarity(); + var similarityToUse = similarityFromModel == null ? SimilarityMeasure.DOT_PRODUCT : similarityFromModel; + + var updatedServiceSettings = new IbmWatsonxEmbeddingsServiceSettings( + serviceSettings.modelId(), + serviceSettings.projectId(), + serviceSettings.url(), + serviceSettings.apiVersion(), + serviceSettings.maxInputTokens(), + embeddingSize, + similarityToUse, + serviceSettings.rateLimitSettings() ); + + return new IbmWatsonxEmbeddingsModel(embeddingsModel, updatedServiceSettings); } else { - listener.onResponse(model); + throw ServiceUtils.invalidModelTypeForUpdateModelWithEmbeddingDetails(model.getClass()); } } - private IbmWatsonxEmbeddingsModel updateModelWithEmbeddingDetails(IbmWatsonxEmbeddingsModel model, int embeddingSize) { - var similarityFromModel = model.getServiceSettings().similarity(); - var similarityToUse = similarityFromModel == null ? SimilarityMeasure.DOT_PRODUCT : similarityFromModel; - - IbmWatsonxEmbeddingsServiceSettings serviceSettings = new IbmWatsonxEmbeddingsServiceSettings( - model.getServiceSettings().modelId(), - model.getServiceSettings().projectId(), - model.getServiceSettings().url(), - model.getServiceSettings().apiVersion(), - model.getServiceSettings().maxInputTokens(), - embeddingSize, - similarityToUse, - model.getServiceSettings().rateLimitSettings() - ); - - return new IbmWatsonxEmbeddingsModel(model, serviceSettings); - } - @Override protected void doInfer( Model model, @@ -257,7 +282,6 @@ protected void doChunkedInfer( DocumentsOnlyInput input, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { @@ -266,7 +290,8 @@ protected void doChunkedInfer( var batchedRequests = new EmbeddingRequestChunker( input.getInputs(), EMBEDDING_MAX_BATCH_SIZE, - EmbeddingRequestChunker.EmbeddingType.FLOAT + EmbeddingRequestChunker.EmbeddingType.FLOAT, + model.getConfigurations().getChunkingSettings() ).batchRequestsWithListeners(listener); for (var request : batchedRequests) { var action = ibmWatsonxModel.accept(getActionCreator(getSender(), getServiceComponents()), taskSettings, inputType); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModel.java index d60e31b5d41c0..6b20e07ecc0a2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModel.java @@ -9,6 +9,7 @@ import org.apache.http.client.utils.URIBuilder; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; @@ -40,6 +41,7 @@ public IbmWatsonxEmbeddingsModel( String service, Map serviceSettings, Map taskSettings, + ChunkingSettings chunkingSettings, Map secrets, ConfigurationParseContext context ) { @@ -49,6 +51,7 @@ public IbmWatsonxEmbeddingsModel( service, IbmWatsonxEmbeddingsServiceSettings.fromMap(serviceSettings, context), EmptyTaskSettings.INSTANCE, + chunkingSettings, DefaultSecretSettings.fromMap(secrets) ); } @@ -64,10 +67,11 @@ public IbmWatsonxEmbeddingsModel(IbmWatsonxEmbeddingsModel model, IbmWatsonxEmbe String service, IbmWatsonxEmbeddingsServiceSettings serviceSettings, TaskSettings taskSettings, + ChunkingSettings chunkingsettings, @Nullable DefaultSecretSettings secrets ) { super( - new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings), + new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings, chunkingsettings), new ModelSecrets(secrets), serviceSettings ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java index 2e810c357f8bd..fe0edb851902b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/mistral/MistralService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -95,7 +94,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java index 81ab87a461696..20ff1c617d21f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -264,7 +263,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 770e6e3cb9cf4..2416aeb62ff33 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -291,7 +291,7 @@ private static ShardBulkInferenceActionFilter createFilter(ThreadPool threadPool StaticModel model = (StaticModel) invocationOnMock.getArguments()[0]; List inputs = (List) invocationOnMock.getArguments()[2]; ActionListener> listener = (ActionListener< - List>) invocationOnMock.getArguments()[7]; + List>) invocationOnMock.getArguments()[6]; Runnable runnable = () -> { List results = new ArrayList<>(); for (String input : inputs) { @@ -310,7 +310,7 @@ private static ShardBulkInferenceActionFilter createFilter(ThreadPool threadPool } return null; }; - doAnswer(chunkedInferAnswer).when(inferenceService).chunkedInfer(any(), any(), any(), any(), any(), any(), any(), any()); + doAnswer(chunkedInferAnswer).when(inferenceService).chunkedInfer(any(), any(), any(), any(), any(), any(), any()); Answer modelAnswer = invocationOnMock -> { String inferenceId = (String) invocationOnMock.getArguments()[0]; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java index 32450e3facfd0..7ff79e2618425 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/googlevertexai/GoogleVertexAiRerankResponseEntityTests.java @@ -39,7 +39,7 @@ public void testFromResponse_CreatesResultsForASingleItem() throws IOException { new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) ); - assertThat(parsedResults.getRankedDocs(), is(List.of(new RankedDocsResults.RankedDoc(0, 0.97F, "content 2")))); + assertThat(parsedResults.getRankedDocs(), is(List.of(new RankedDocsResults.RankedDoc(2, 0.97F, "content 2")))); } public void testFromResponse_CreatesResultsForMultipleItems() throws IOException { @@ -68,7 +68,7 @@ public void testFromResponse_CreatesResultsForMultipleItems() throws IOException assertThat( parsedResults.getRankedDocs(), - is(List.of(new RankedDocsResults.RankedDoc(0, 0.97F, "content 2"), new RankedDocsResults.RankedDoc(1, 0.90F, "content 1"))) + is(List.of(new RankedDocsResults.RankedDoc(2, 0.97F, "content 2"), new RankedDocsResults.RankedDoc(1, 0.90F, "content 1"))) ); } @@ -161,4 +161,37 @@ public void testFromResponse_FailsWhenScoreFieldIsNotPresent() { assertThat(thrownException.getMessage(), is("Failed to find required field [score] in Google Vertex AI rerank response")); } + + public void testFromResponse_FailsWhenIDFieldIsNotInteger() { + String responseJson = """ + { + "records": [ + { + "id": "abcd", + "title": "title 2", + "content": "content 2", + "score": 0.97 + }, + { + "id": "1", + "title": "title 1", + "content": "content 1", + "score": 0.96 + } + ] + } + """; + + var thrownException = expectThrows( + IllegalStateException.class, + () -> GoogleVertexAiRerankResponseEntity.fromResponse( + new HttpResult(mock(HttpResponse.class), responseJson.getBytes(StandardCharsets.UTF_8)) + ) + ); + + assertThat( + thrownException.getMessage(), + is("Expected numeric value for record ID field in Google Vertex AI rerank response but received [abcd]") + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index f444719c730f5..71ff9fc7d84cf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -61,7 +61,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.inference.DefaultElserFeatureFlag; import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.model.TestModel; import org.junit.AssumptionViolatedException; @@ -103,9 +102,6 @@ protected Collection getPlugins() { @Override protected void minimalMapping(XContentBuilder b) throws IOException { b.field("type", "semantic_text"); - if (DefaultElserFeatureFlag.isEnabled() == false) { - b.field("inference_id", "test_model"); - } } @Override @@ -113,6 +109,12 @@ protected String minimalIsInvalidRoutingPathErrorMessage(Mapper mapper) { return "cannot have nested fields when index is in [index.mode=time_series]"; } + @Override + protected void metaMapping(XContentBuilder b) throws IOException { + super.metaMapping(b); + b.field(INFERENCE_ID_FIELD, DEFAULT_ELSER_2_INFERENCE_ID); + } + @Override protected Object getSampleValueForDocument() { return null; @@ -170,14 +172,13 @@ protected void assertSearchable(MappedFieldType fieldType) { public void testDefaults() throws Exception { final String fieldName = "field"; final XContentBuilder fieldMapping = fieldMapping(this::minimalMapping); + final XContentBuilder expectedMapping = fieldMapping(this::metaMapping); MapperService mapperService = createMapperService(fieldMapping); DocumentMapper mapper = mapperService.documentMapper(); - assertEquals(Strings.toString(fieldMapping), mapper.mappingSource().toString()); + assertEquals(Strings.toString(expectedMapping), mapper.mappingSource().toString()); assertSemanticTextField(mapperService, fieldName, false); - if (DefaultElserFeatureFlag.isEnabled()) { - assertInferenceEndpoints(mapperService, fieldName, DEFAULT_ELSER_2_INFERENCE_ID, DEFAULT_ELSER_2_INFERENCE_ID); - } + assertInferenceEndpoints(mapperService, fieldName, DEFAULT_ELSER_2_INFERENCE_ID, DEFAULT_ELSER_2_INFERENCE_ID); ParsedDocument doc1 = mapper.parse(source(this::writeField)); List fields = doc1.rootDoc().getFields("field"); @@ -211,15 +212,18 @@ public void testSetInferenceEndpoints() throws IOException { assertSerialization.accept(fieldMapping, mapperService); } { - if (DefaultElserFeatureFlag.isEnabled()) { - final XContentBuilder fieldMapping = fieldMapping( - b -> b.field("type", "semantic_text").field(SEARCH_INFERENCE_ID_FIELD, searchInferenceId) - ); - final MapperService mapperService = createMapperService(fieldMapping); - assertSemanticTextField(mapperService, fieldName, false); - assertInferenceEndpoints(mapperService, fieldName, DEFAULT_ELSER_2_INFERENCE_ID, searchInferenceId); - assertSerialization.accept(fieldMapping, mapperService); - } + final XContentBuilder fieldMapping = fieldMapping( + b -> b.field("type", "semantic_text").field(SEARCH_INFERENCE_ID_FIELD, searchInferenceId) + ); + final XContentBuilder expectedMapping = fieldMapping( + b -> b.field("type", "semantic_text") + .field(INFERENCE_ID_FIELD, DEFAULT_ELSER_2_INFERENCE_ID) + .field(SEARCH_INFERENCE_ID_FIELD, searchInferenceId) + ); + final MapperService mapperService = createMapperService(fieldMapping); + assertSemanticTextField(mapperService, fieldName, false); + assertInferenceEndpoints(mapperService, fieldName, DEFAULT_ELSER_2_INFERENCE_ID, searchInferenceId); + assertSerialization.accept(expectedMapping, mapperService); } { final XContentBuilder fieldMapping = fieldMapping( @@ -246,26 +250,18 @@ public void testInvalidInferenceEndpoints() { ); } { - final String expectedMessage = DefaultElserFeatureFlag.isEnabled() - ? "[inference_id] on mapper [field] of type [semantic_text] must not be empty" - : "[inference_id] on mapper [field] of type [semantic_text] must be specified"; Exception e = expectThrows( MapperParsingException.class, () -> createMapperService(fieldMapping(b -> b.field("type", "semantic_text").field(INFERENCE_ID_FIELD, ""))) ); - assertThat(e.getMessage(), containsString(expectedMessage)); + assertThat(e.getMessage(), containsString("[inference_id] on mapper [field] of type [semantic_text] must not be empty")); } { - if (DefaultElserFeatureFlag.isEnabled()) { - Exception e = expectThrows( - MapperParsingException.class, - () -> createMapperService(fieldMapping(b -> b.field("type", "semantic_text").field(SEARCH_INFERENCE_ID_FIELD, ""))) - ); - assertThat( - e.getMessage(), - containsString("[search_inference_id] on mapper [field] of type [semantic_text] must not be empty") - ); - } + Exception e = expectThrows( + MapperParsingException.class, + () -> createMapperService(fieldMapping(b -> b.field("type", "semantic_text").field(SEARCH_INFERENCE_ID_FIELD, ""))) + ); + assertThat(e.getMessage(), containsString("[search_inference_id] on mapper [field] of type [semantic_text] must not be empty")); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java index d8402c28cec87..47a96bf78dda1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/SenderServiceTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySettingsConfiguration; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -126,7 +125,6 @@ protected void doChunkedInfer( DocumentsOnlyInput inputs, Map taskSettings, InputType inputType, - ChunkingOptions chunkingOptions, TimeValue timeout, ActionListener> listener ) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java index aac111c22558e..a154ded395822 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.services.alibabacloudsearch; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.bytes.BytesArray; @@ -15,13 +16,13 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -50,6 +51,7 @@ import org.elasticsearch.xpack.inference.services.alibabacloudsearch.embeddings.AlibabaCloudSearchEmbeddingsServiceSettingsTests; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.embeddings.AlibabaCloudSearchEmbeddingsTaskSettingsTests; import org.elasticsearch.xpack.inference.services.alibabacloudsearch.sparse.AlibabaCloudSearchSparseModel; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests; import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; @@ -325,6 +327,43 @@ public void doInfer( } } + public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new AlibabaCloudSearchService(senderFactory, createWithEmptySettings(threadPool))) { + var model = OpenAiChatCompletionModelTests.createChatCompletionModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + assertThrows( + ElasticsearchStatusException.class, + () -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); } + ); + } + } + + public void testUpdateModelWithEmbeddingDetails_UpdatesEmbeddingSizeAndSimilarity() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + try (var service = new AlibabaCloudSearchService(senderFactory, createWithEmptySettings(threadPool))) { + var embeddingSize = randomNonNegativeInt(); + var model = AlibabaCloudSearchEmbeddingsModelTests.createModel( + randomAlphaOfLength(10), + randomFrom(TaskType.values()), + AlibabaCloudSearchEmbeddingsServiceSettingsTests.createRandom(), + AlibabaCloudSearchEmbeddingsTaskSettingsTests.createRandom(), + null + ); + + Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize); + + assertEquals(SimilarityMeasure.DOT_PRODUCT, updatedModel.getServiceSettings().similarity()); + assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue()); + } + } + public void testChunkedInfer_TextEmbeddingChunkingSettingsSet() throws IOException { testChunkedInfer(TaskType.TEXT_EMBEDDING, ChunkingSettingsTests.createRandomChunkingSettings()); } @@ -361,7 +400,6 @@ public void testChunkedInfer_InvalidTaskType() throws IOException { List.of("foo", "bar"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); @@ -380,16 +418,7 @@ private void testChunkedInfer(TaskType taskType, ChunkingSettings chunkingSettin var model = createModelForTaskType(taskType, chunkingSettings); PlainActionFuture> listener = new PlainActionFuture<>(); - service.chunkedInfer( - model, - null, - input, - new HashMap<>(), - InputType.INGEST, - new ChunkingOptions(null, null), - InferenceAction.Request.DEFAULT_TIMEOUT, - listener - ); + service.chunkedInfer(model, null, input, new HashMap<>(), InputType.INGEST, InferenceAction.Request.DEFAULT_TIMEOUT, listener); var results = listener.actionGet(TIMEOUT); assertThat(results, instanceOf(List.class)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java index e76fb10c96131..35b5642b7a60c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/AmazonBedrockServiceTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -50,6 +49,7 @@ import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModel; import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsModelTests; import org.elasticsearch.xpack.inference.services.amazonbedrock.embeddings.AmazonBedrockEmbeddingsServiceSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; import org.hamcrest.CoreMatchers; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; @@ -72,6 +72,7 @@ import static org.elasticsearch.xpack.inference.results.ChatCompletionResultsTests.buildExpectationCompletion; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; import static org.elasticsearch.xpack.inference.services.ServiceComponentsTests.createWithEmptySettings; +import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockProviderCapabilities.getProviderDefaultSimilarityMeasure; import static org.elasticsearch.xpack.inference.services.amazonbedrock.AmazonBedrockSecretSettingsTests.getAmazonBedrockSecretSettingsMap; import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionServiceSettingsTests.createChatCompletionRequestSettingsMap; import static org.elasticsearch.xpack.inference.services.amazonbedrock.completion.AmazonBedrockChatCompletionTaskSettingsTests.getChatCompletionTaskSettingsMap; @@ -1375,6 +1376,78 @@ public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensio } } + public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + var model = AmazonBedrockChatCompletionModelTests.createModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomFrom(AmazonBedrockProvider.values()), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + assertThrows( + ElasticsearchStatusException.class, + () -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); } + ); + } + } + + public void testUpdateModelWithEmbeddingDetails_NullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(null); + } + + public void testUpdateModelWithEmbeddingDetails_NonNullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(randomFrom(SimilarityMeasure.values())); + } + + private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure similarityMeasure) throws IOException { + var sender = mock(Sender.class); + var factory = mock(HttpRequestSender.Factory.class); + when(factory.createSender()).thenReturn(sender); + + var amazonBedrockFactory = new AmazonBedrockMockRequestSender.Factory( + ServiceComponentsTests.createWithSettings(threadPool, Settings.EMPTY), + mockClusterServiceEmpty() + ); + + try (var service = new AmazonBedrockService(factory, amazonBedrockFactory, createWithEmptySettings(threadPool))) { + var embeddingSize = randomNonNegativeInt(); + var provider = randomFrom(AmazonBedrockProvider.values()); + var model = AmazonBedrockEmbeddingsModelTests.createModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + provider, + randomNonNegativeInt(), + randomBoolean(), + randomNonNegativeInt(), + similarityMeasure, + RateLimitSettingsTests.createRandom(), + createRandomChunkingSettings(), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + + Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize); + + SimilarityMeasure expectedSimilarityMeasure = similarityMeasure == null + ? getProviderDefaultSimilarityMeasure(provider) + : similarityMeasure; + assertEquals(expectedSimilarityMeasure, updatedModel.getServiceSettings().similarity()); + assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue()); + } + } + public void testInfer_UnauthorizedResponse() throws IOException { var sender = mock(Sender.class); var factory = mock(HttpRequestSender.Factory.class); @@ -1485,7 +1558,6 @@ private void testChunkedInfer(AmazonBedrockEmbeddingsModel model) throws IOExcep List.of("abc", "xyz"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 76ea7a5bde5ca..8636ba8890e87 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -1194,7 +1193,6 @@ private void testChunkedInfer(AzureAiStudioEmbeddingsModel model) throws IOExcep List.of("foo", "bar"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 40f8b7e0977e4..b0c590e237a44 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -1194,6 +1193,53 @@ public void testCheckModelConfig_ReturnsNewModelReference_AndDoesNotSendDimensio } } + public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new AzureOpenAiService(senderFactory, createWithEmptySettings(threadPool))) { + var model = AzureOpenAiCompletionModelTests.createModelWithRandomValues(); + assertThrows( + ElasticsearchStatusException.class, + () -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); } + ); + } + } + + public void testUpdateModelWithEmbeddingDetails_NullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(null); + } + + public void testUpdateModelWithEmbeddingDetails_NonNullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(randomFrom(SimilarityMeasure.values())); + } + + private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure similarityMeasure) throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new AzureOpenAiService(senderFactory, createWithEmptySettings(threadPool))) { + var embeddingSize = randomNonNegativeInt(); + var model = AzureOpenAiEmbeddingsModelTests.createModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomNonNegativeInt(), + randomBoolean(), + randomNonNegativeInt(), + similarityMeasure, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + + Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize); + + SimilarityMeasure expectedSimilarityMeasure = similarityMeasure == null ? SimilarityMeasure.DOT_PRODUCT : similarityMeasure; + assertEquals(expectedSimilarityMeasure, updatedModel.getServiceSettings().similarity()); + assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue()); + } + } + public void testInfer_UnauthorisedResponse() throws IOException, URISyntaxException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); @@ -1296,7 +1342,6 @@ private void testChunkedInfer(AzureOpenAiEmbeddingsModel model) throws IOExcepti List.of("foo", "bar"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 725879e76efc1..259a32aa6254d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -1074,6 +1073,50 @@ public void testCheckModelConfig_DoesNotUpdateSimilarity_WhenItIsSpecifiedAsCosi } } + public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { + var model = CohereCompletionModelTests.createModel(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + assertThrows( + ElasticsearchStatusException.class, + () -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); } + ); + } + } + + public void testUpdateModelWithEmbeddingDetails_NullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(null); + } + + public void testUpdateModelWithEmbeddingDetails_NonNullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(randomFrom(SimilarityMeasure.values())); + } + + private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure similarityMeasure) throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new CohereService(senderFactory, createWithEmptySettings(threadPool))) { + var embeddingSize = randomNonNegativeInt(); + var model = CohereEmbeddingsModelTests.createModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, + randomNonNegativeInt(), + randomNonNegativeInt(), + randomAlphaOfLength(10), + randomFrom(CohereEmbeddingType.values()), + similarityMeasure + ); + + Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize); + + SimilarityMeasure expectedSimilarityMeasure = similarityMeasure == null ? CohereService.defaultSimilarity() : similarityMeasure; + assertEquals(expectedSimilarityMeasure, updatedModel.getServiceSettings().similarity()); + assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue()); + } + } + public void testInfer_UnauthorisedResponse() throws IOException { var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); @@ -1407,7 +1450,6 @@ private void testChunkedInfer(CohereEmbeddingsModel model) throws IOException { List.of("foo", "bar"), new HashMap<>(), InputType.UNSPECIFIED, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); @@ -1499,7 +1541,6 @@ public void testChunkedInfer_BatchesCalls_Bytes() throws IOException { List.of("foo", "bar"), new HashMap<>(), InputType.UNSPECIFIED, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java index 3767ac496d183..d3101099d06c7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.EmptySecretSettings; import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -461,7 +460,6 @@ public void testChunkedInfer_PassesThrough() throws IOException { List.of("input text"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 9a4d0dda82238..306509ea60cfc 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InferenceResults; @@ -902,7 +901,6 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) throws Interru List.of("foo", "bar"), Map.of(), InputType.SEARCH, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, latchedListener ); @@ -973,7 +971,6 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) throws Int List.of("foo", "bar"), Map.of(), InputType.SEARCH, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, latchedListener ); @@ -1044,7 +1041,6 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) throws Inte List.of("foo", "bar"), Map.of(), InputType.SEARCH, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, latchedListener ); @@ -1090,7 +1086,6 @@ public void testChunkInferSetsTokenization() { List.of("foo", "bar"), Map.of(), InputType.SEARCH, - null, InferenceAction.Request.DEFAULT_TIMEOUT, ActionListener.wrap(r -> fail("unexpected result"), e -> fail(e.getMessage())) ); @@ -1102,7 +1097,6 @@ public void testChunkInferSetsTokenization() { List.of("foo", "bar"), Map.of(), InputType.SEARCH, - new ChunkingOptions(256, null), InferenceAction.Request.DEFAULT_TIMEOUT, ActionListener.wrap(r -> fail("unexpected result"), e -> fail(e.getMessage())) ); @@ -1155,7 +1149,6 @@ public void testChunkInfer_FailsBatch() throws InterruptedException { List.of("foo", "bar", "baz"), Map.of(), InputType.SEARCH, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, latchedListener ); @@ -1228,7 +1221,6 @@ public void testChunkingLargeDocument() throws InterruptedException { List.of(input), Map.of(), InputType.SEARCH, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, latchedListener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index bc8020d8d88fe..375c583cce13a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; @@ -870,16 +869,7 @@ private void testChunkedInfer(String modelId, String apiKey, GoogleAiStudioEmbed webServer.enqueue(new MockResponse().setResponseCode(200).setBody(responseJson)); PlainActionFuture> listener = new PlainActionFuture<>(); - service.chunkedInfer( - model, - null, - input, - new HashMap<>(), - InputType.INGEST, - new ChunkingOptions(null, null), - InferenceAction.Request.DEFAULT_TIMEOUT, - listener - ); + service.chunkedInfer(model, null, input, new HashMap<>(), InputType.INGEST, InferenceAction.Request.DEFAULT_TIMEOUT, listener); var results = listener.actionGet(TIMEOUT); assertThat(results, hasSize(2)); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java index 906a825e49561..2aeba5fcbe209 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockWebServer; @@ -30,9 +31,11 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModelTests; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsServiceSettings; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; +import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModelTests; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankTaskSettings; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; @@ -827,6 +830,37 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting } } + public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException { + try (var service = createGoogleVertexAiService()) { + var model = GoogleVertexAiRerankModelTests.createModel(randomAlphaOfLength(10), randomNonNegativeInt()); + assertThrows( + ElasticsearchStatusException.class, + () -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); } + ); + } + } + + public void testUpdateModelWithEmbeddingDetails_NullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(null); + } + + public void testUpdateModelWithEmbeddingDetails_NonNullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(randomFrom(SimilarityMeasure.values())); + } + + private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure similarityMeasure) throws IOException { + try (var service = createGoogleVertexAiService()) { + var embeddingSize = randomNonNegativeInt(); + var model = GoogleVertexAiEmbeddingsModelTests.createModel(randomAlphaOfLength(10), randomBoolean(), similarityMeasure); + + Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize); + + SimilarityMeasure expectedSimilarityMeasure = similarityMeasure == null ? SimilarityMeasure.DOT_PRODUCT : similarityMeasure; + assertEquals(expectedSimilarityMeasure, updatedModel.getServiceSettings().similarity()); + assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue()); + } + } + // testInfer tested via end-to-end notebook tests in AppEx repo @SuppressWarnings("checkstyle:LineLength") diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java index 7836c5c15cfb1..5b016de7493f5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java @@ -64,7 +64,7 @@ public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAre } public void testOverrideWith_SetsInputTypeToOverride_WhenFieldIsNullInModelTaskSettings_AndNullInRequestTaskSettings() { - var model = createModel("model", Boolean.FALSE, null); + var model = createModel("model", Boolean.FALSE, (InputType) null); var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); @@ -80,7 +80,7 @@ public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingStoredT } public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingRequestTaskSettings() { - var model = createModel("model", Boolean.FALSE, null); + var model = createModel("model", Boolean.FALSE, (InputType) null); var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.CLUSTERING), InputType.SEARCH); var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); @@ -96,10 +96,10 @@ public void testOverrideWith_OverridesInputType_WithRequestTaskSettingsSearch_Wh } public void testOverrideWith_DoesNotSetInputType_FromRequest_IfInputTypeIsInvalid() { - var model = createModel("model", Boolean.FALSE, null); + var model = createModel("model", Boolean.FALSE, (InputType) null); var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); - var expectedModel = createModel("model", Boolean.FALSE, null); + var expectedModel = createModel("model", Boolean.FALSE, (InputType) null); MatcherAssert.assertThat(overriddenModel, is(expectedModel)); } @@ -136,6 +136,31 @@ public static GoogleVertexAiEmbeddingsModel createModel( ); } + public static GoogleVertexAiEmbeddingsModel createModel( + String modelId, + @Nullable Boolean autoTruncate, + SimilarityMeasure similarityMeasure + ) { + return new GoogleVertexAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new GoogleVertexAiEmbeddingsServiceSettings( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + modelId, + false, + null, + null, + similarityMeasure, + null + ), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, randomFrom(InputType.INGEST, InputType.SEARCH)), + null, + new GoogleVertexAiSecretSettings(new SecureString(randomAlphaOfLength(8).toCharArray())) + ); + } + public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate, @Nullable InputType inputType) { return new GoogleVertexAiEmbeddingsModel( "id", diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java index df82f1ed393bf..8f0e481213cdf 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceElserServiceTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; @@ -98,7 +97,6 @@ public void testChunkedInfer_CallsInfer_Elser_ConvertsFloatResponse() throws IOE List.of("abc"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index 0ff4bd805ea36..022cbecd1ea6a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -782,7 +781,6 @@ public void testChunkedInfer_CallsInfer_TextEmbedding_ConvertsFloatResponse() th List.of("abc"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); @@ -838,7 +836,6 @@ public void testChunkedInfer() throws IOException { List.of("abc"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java index d6c491f2b7cec..5aa826f1d80fe 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxServiceTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; +import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.EmptyTaskSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -50,6 +50,7 @@ import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ibmwatsonx.embeddings.IbmWatsonxEmbeddingsModel; import org.elasticsearch.xpack.inference.services.ibmwatsonx.embeddings.IbmWatsonxEmbeddingsModelTests; +import org.elasticsearch.xpack.inference.services.openai.completion.OpenAiChatCompletionModelTests; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; import org.junit.After; @@ -69,6 +70,8 @@ import static org.elasticsearch.xpack.inference.Utils.getPersistedConfigMap; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; +import static org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests.createRandomChunkingSettings; +import static org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests.createRandomChunkingSettingsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; import static org.elasticsearch.xpack.inference.results.TextEmbeddingResultsTests.buildExpectationFloat; @@ -124,6 +127,7 @@ public void testParseRequestConfig_CreatesAIbmWatsonxEmbeddingsModel() throws IO assertThat(embeddingsModel.getServiceSettings().url(), is(URI.create(url))); assertThat(embeddingsModel.getServiceSettings().apiVersion(), is(apiVersion)); assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is(apiKey)); + assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); }, e -> fail("Model parsing should have succeeded, but failed: " + e.getMessage())); service.parseRequestConfig( @@ -150,6 +154,45 @@ public void testParseRequestConfig_CreatesAIbmWatsonxEmbeddingsModel() throws IO } } + public void testParseRequestConfig_CreatesAIbmWatsonxEmbeddingsModelWhenChunkingSettingsProvided() throws IOException { + try (var service = createIbmWatsonxService()) { + ActionListener modelListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(IbmWatsonxEmbeddingsModel.class)); + + var embeddingsModel = (IbmWatsonxEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().url(), is(URI.create(url))); + assertThat(embeddingsModel.getServiceSettings().apiVersion(), is(apiVersion)); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is(apiKey)); + assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); + }, e -> fail("Model parsing should have succeeded, but failed: " + e.getMessage())); + + service.parseRequestConfig( + "id", + TaskType.TEXT_EMBEDDING, + getRequestConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + IbmWatsonxServiceFields.PROJECT_ID, + projectId, + ServiceFields.URL, + url, + IbmWatsonxServiceFields.API_VERSION, + apiVersion + ) + ), + new HashMap<>(Map.of()), + createRandomChunkingSettingsMap(), + getSecretSettingsMap(apiKey) + ), + modelListener + ); + } + } + public void testParseRequestConfig_ThrowsUnsupportedModelType() throws IOException { try (var service = createIbmWatsonxService()) { var failureListener = getModelListenerForException( @@ -235,6 +278,47 @@ public void testParsePersistedConfigWithSecrets_CreatesAIbmWatsonxEmbeddingsMode assertThat(embeddingsModel.getServiceSettings().apiVersion(), is(apiVersion)); assertThat(embeddingsModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE)); assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is(apiKey)); + assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); + } + } + + public void testParsePersistedConfigWithSecrets_CreatesAIbmWatsonxEmbeddingsModelWhenChunkingSettingsProvided() throws IOException { + try (var service = createIbmWatsonxService()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + IbmWatsonxServiceFields.PROJECT_ID, + projectId, + ServiceFields.URL, + url, + IbmWatsonxServiceFields.API_VERSION, + apiVersion + ) + ), + getTaskSettingsMapEmpty(), + createRandomChunkingSettingsMap(), + getSecretSettingsMap(apiKey) + ); + + var model = service.parsePersistedConfigWithSecrets( + "id", + TaskType.TEXT_EMBEDDING, + persistedConfig.config(), + persistedConfig.secrets() + ); + + assertThat(model, instanceOf(IbmWatsonxEmbeddingsModel.class)); + + var embeddingsModel = (IbmWatsonxEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().url(), is(URI.create(url))); + assertThat(embeddingsModel.getServiceSettings().apiVersion(), is(apiVersion)); + assertThat(embeddingsModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE)); + assertThat(embeddingsModel.getSecretSettings().apiKey().toString(), is(apiKey)); + assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -399,6 +483,73 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists } } + public void testParsePersistedConfig_CreatesAIbmWatsonxEmbeddingsModelWhenChunkingSettingsNotProvided() throws IOException { + try (var service = createIbmWatsonxService()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + IbmWatsonxServiceFields.PROJECT_ID, + projectId, + ServiceFields.URL, + url, + IbmWatsonxServiceFields.API_VERSION, + apiVersion + ) + ), + getTaskSettingsMapEmpty(), + null + ); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(IbmWatsonxEmbeddingsModel.class)); + + var embeddingsModel = (IbmWatsonxEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().url(), is(URI.create(url))); + assertThat(embeddingsModel.getServiceSettings().apiVersion(), is(apiVersion)); + assertThat(embeddingsModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE)); + assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); + } + } + + public void testParsePersistedConfig_CreatesAIbmWatsonxEmbeddingsModelWhenChunkingSettingsProvided() throws IOException { + try (var service = createIbmWatsonxService()) { + var persistedConfig = getPersistedConfigMap( + new HashMap<>( + Map.of( + ServiceFields.MODEL_ID, + modelId, + IbmWatsonxServiceFields.PROJECT_ID, + projectId, + ServiceFields.URL, + url, + IbmWatsonxServiceFields.API_VERSION, + apiVersion + ) + ), + getTaskSettingsMapEmpty(), + createRandomChunkingSettingsMap(), + null + ); + + var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); + + assertThat(model, instanceOf(IbmWatsonxEmbeddingsModel.class)); + + var embeddingsModel = (IbmWatsonxEmbeddingsModel) model; + assertThat(embeddingsModel.getServiceSettings().modelId(), is(modelId)); + assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); + assertThat(embeddingsModel.getServiceSettings().url(), is(URI.create(url))); + assertThat(embeddingsModel.getServiceSettings().apiVersion(), is(apiVersion)); + assertThat(embeddingsModel.getTaskSettings(), is(EmptyTaskSettings.INSTANCE)); + assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); + } + } + public void testInfer_ThrowsErrorWhenModelIsNotIbmWatsonxModel() throws IOException { var sender = mock(Sender.class); @@ -488,7 +639,15 @@ public void testInfer_SendsEmbeddingsRequest() throws IOException { } } - public void testChunkedInfer_Batches() throws IOException { + public void testChunkedInfer_ChunkingSettingsNotSet() throws IOException { + testChunkedInfer_Batches(null); + } + + public void testChunkedInfer_ChunkingSettingsSet() throws IOException { + testChunkedInfer_Batches(createRandomChunkingSettings()); + } + + private void testChunkedInfer_Batches(ChunkingSettings chunkingSettings) throws IOException { var input = List.of("foo", "bar"); var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); @@ -526,16 +685,7 @@ public void testChunkedInfer_Batches() throws IOException { getUrl(webServer) ); PlainActionFuture> listener = new PlainActionFuture<>(); - service.chunkedInfer( - model, - null, - input, - new HashMap<>(), - InputType.INGEST, - new ChunkingOptions(null, null), - InferenceAction.Request.DEFAULT_TIMEOUT, - listener - ); + service.chunkedInfer(model, null, input, new HashMap<>(), InputType.INGEST, InferenceAction.Request.DEFAULT_TIMEOUT, listener); var results = listener.actionGet(TIMEOUT); assertThat(results, hasSize(2)); @@ -771,6 +921,56 @@ public void testCheckModelConfig_DoesNotUpdateSimilarity_WhenItIsSpecifiedAsCosi } } + public void testUpdateModelWithEmbeddingDetails_InvalidModelProvided() throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new IbmWatsonxServiceWithoutAuth(senderFactory, createWithEmptySettings(threadPool))) { + var model = OpenAiChatCompletionModelTests.createChatCompletionModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10) + ); + assertThrows( + ElasticsearchStatusException.class, + () -> { service.updateModelWithEmbeddingDetails(model, randomNonNegativeInt()); } + ); + } + } + + public void testUpdateModelWithEmbeddingDetails_NullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(null); + } + + public void testUpdateModelWithEmbeddingDetails_NonNullSimilarityInOriginalModel() throws IOException { + testUpdateModelWithEmbeddingDetails_Successful(randomFrom(SimilarityMeasure.values())); + } + + private void testUpdateModelWithEmbeddingDetails_Successful(SimilarityMeasure similarityMeasure) throws IOException { + var senderFactory = HttpRequestSenderTests.createSenderFactory(threadPool, clientManager); + + try (var service = new IbmWatsonxServiceWithoutAuth(senderFactory, createWithEmptySettings(threadPool))) { + var embeddingSize = randomNonNegativeInt(); + var model = IbmWatsonxEmbeddingsModelTests.createModel( + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + URI.create(randomAlphaOfLength(10)), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomNonNegativeInt(), + similarityMeasure + ); + + Model updatedModel = service.updateModelWithEmbeddingDetails(model, embeddingSize); + + SimilarityMeasure expectedSimilarityMeasure = similarityMeasure == null ? SimilarityMeasure.DOT_PRODUCT : similarityMeasure; + assertEquals(expectedSimilarityMeasure, updatedModel.getServiceSettings().similarity()); + assertEquals(embeddingSize, updatedModel.getServiceSettings().dimensions().intValue()); + } + } + public void testGetConfiguration() throws Exception { try (var service = createIbmWatsonxService()) { String content = XContentHelper.stripWhitespace(""" @@ -878,6 +1078,18 @@ private static ActionListener getModelListenerForException(Class excep }); } + private Map getRequestConfigMap( + Map serviceSettings, + Map taskSettings, + Map chunkingSettings, + Map secretSettings + ) { + var requestConfigMap = getRequestConfigMap(serviceSettings, taskSettings, secretSettings); + requestConfigMap.put(ModelConfigurations.CHUNKING_SETTINGS, chunkingSettings); + + return requestConfigMap; + } + private Map getRequestConfigMap( Map serviceSettings, Map taskSettings, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModelTests.java index 93fd7e402a0de..33fcd752fbf30 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsModelTests.java @@ -82,6 +82,7 @@ public static IbmWatsonxEmbeddingsModel createModel( null ), EmptyTaskSettings.INSTANCE, + null, new DefaultSecretSettings(new SecureString(apiKey.toCharArray())) ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java index 71e9eac9a6635..73bf03fd43ec5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -673,7 +672,6 @@ public void testChunkedInfer(MistralEmbeddingsModel model) throws IOException { List.of("abc", "def"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index 509a1f8a3d010..76b5d6fee2c59 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkedInferenceServiceResults; -import org.elasticsearch.inference.ChunkingOptions; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; import org.elasticsearch.inference.InferenceServiceResults; @@ -1558,7 +1557,6 @@ private void testChunkedInfer(OpenAiEmbeddingsModel model) throws IOException { List.of("foo", "bar"), new HashMap<>(), InputType.INGEST, - new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, listener ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java index 767dd4d64a7d3..22ef35c3a46d3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/validation/SimpleServiceIntegrationValidatorTests.java @@ -71,8 +71,10 @@ public void testValidate_ServiceThrowsException() { any() ); - assertThrows(ElasticsearchStatusException.class, () -> { - underTest.validate(mockInferenceService, mockModel, mockActionListener);}); + assertThrows( + ElasticsearchStatusException.class, + () -> { underTest.validate(mockInferenceService, mockModel, mockActionListener); } + ); verifyCallToService(false); } diff --git a/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java b/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java index fe406722ae1e2..8d8ad94d608d7 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java +++ b/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java @@ -36,7 +36,7 @@ protected Settings restClientSettings() { var baseSettings = super.restClientSettings(); return Settings.builder() .put(baseSettings) - .put(CLIENT_SOCKET_TIMEOUT, "120s") // Long timeout for model download + .put(CLIENT_SOCKET_TIMEOUT, "300s") // Long timeout for model download .build(); } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml index 71fb1fd95989f..882f1df03e926 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/10_semantic_text_field_mapping.yml @@ -371,3 +371,29 @@ setup: - match: { error.type: illegal_argument_exception } - match: { error.reason: "semantic_text field [level_1.level_2.sparse_field] cannot be in an object field with subobjects disabled" } + +--- +"Mapping always includes inference ID": + - requires: + cluster_features: "semantic_text.always_emit_inference_id_fix" + reason: always emit inference ID fix added in 8.17.0 + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_inference + capabilities: [default_elser_2] + + - do: + indices.create: + index: test-always-include-inference-id-index + body: + mappings: + properties: + semantic_field: + type: semantic_text + + - do: + indices.get_mapping: + index: test-always-include-inference-id-index + + - exists: test-always-include-inference-id-index.mappings.properties.semantic_field.inference_id diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml index 445df1dc302b9..534e4831c4a0a 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/30_semantic_text_inference.yml @@ -551,7 +551,7 @@ setup: --- "Calculates embeddings using the default ELSER 2 endpoint": - requires: - reason: "default ELSER 2 inference ID is behind a feature flag" + reason: "default ELSER 2 inference ID is enabled via a capability" test_runner_features: [capabilities] capabilities: - method: GET @@ -624,3 +624,55 @@ setup: - match: { _source.level_1.dense_field.text: "another inference test" } - exists: _source.level_1.dense_field.inference.chunks.0.embeddings - match: { _source.level_1.dense_field.inference.chunks.0.text: "another inference test" } + +--- +"Deletes on bulk operation": + - requires: + cluster_features: semantic_text.delete_fix + reason: Delete operations are properly applied when subsequent operations include a semantic text field. + + - do: + bulk: + index: test-index + refresh: true + body: | + {"index":{"_id": "1"}} + {"dense_field": ["you know, for testing", "now with chunks"]} + {"index":{"_id": "2"}} + {"dense_field": ["some more tests", "that include chunks"]} + + - do: + search: + index: test-index + body: + query: + semantic: + field: dense_field + query: "you know, for testing" + + - match: { hits.total.value: 2 } + - match: { hits.total.relation: eq } + - match: { hits.hits.0._source.dense_field.text: ["you know, for testing", "now with chunks"] } + - match: { hits.hits.1._source.dense_field.text: ["some more tests", "that include chunks"] } + + - do: + bulk: + index: test-index + refresh: true + body: | + {"delete":{ "_id": "2"}} + {"update":{"_id": "1"}} + {"doc":{"dense_field": "updated text"}} + + - do: + search: + index: test-index + body: + query: + semantic: + field: dense_field + query: "you know, for testing" + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + - match: { hits.hits.0._source.dense_field.text: "updated text" } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml index 17938f3b61a41..c2704a4c22914 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/40_semantic_text_query.yml @@ -843,7 +843,7 @@ setup: --- "Query a field that uses the default ELSER 2 endpoint": - requires: - reason: "default ELSER 2 inference ID is behind a feature flag" + reason: "default ELSER 2 inference ID is enabled via a capability" test_runner_features: [capabilities] capabilities: - method: GET @@ -878,3 +878,117 @@ setup: - match: { hits.total.value: 1 } - match: { hits.hits.0._id: "doc_1" } + +--- +"Query using a sparse embedding model with size set to zero": + - requires: + cluster_features: "semantic_text.zero_size_fix" + reason: zero size fix added in 8.16.1 & 8.15.5 + + - do: + indices.create: + index: test-sparse-index-with-agg-id + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: sparse-inference-id + non_inference_field: + type: text + agg_id: + type: keyword + + - do: + index: + index: test-sparse-index-with-agg-id + id: doc_1 + body: + inference_field: "inference test" + agg_id: "doc_1" + + - do: + index: + index: test-sparse-index-with-agg-id + id: doc_2 + body: + non_inference_field: "non-inference test" + agg_id: "doc_2" + refresh: true + + - do: + search: + index: test-sparse-index-with-agg-id + body: + size: 0 + query: + semantic: + field: "inference_field" + query: "inference test" + aggs: + agg_ids: + terms: + field: agg_id + + - match: { hits.total.value: 1 } + - length: { hits.hits: 0 } + - length: { aggregations.agg_ids.buckets: 1 } + - match: { aggregations.agg_ids.buckets.0.key: "doc_1" } + - match: { aggregations.agg_ids.buckets.0.doc_count: 1 } + +--- +"Query using a dense embedding model with size set to zero": + - requires: + cluster_features: "semantic_text.zero_size_fix" + reason: zero size fix added in 8.16.1 & 8.15.5 + + - do: + indices.create: + index: test-dense-index-with-agg-id + body: + mappings: + properties: + inference_field: + type: semantic_text + inference_id: dense-inference-id + non_inference_field: + type: text + agg_id: + type: keyword + + - do: + index: + index: test-dense-index-with-agg-id + id: doc_1 + body: + inference_field: "inference test" + agg_id: "doc_1" + + - do: + index: + index: test-dense-index-with-agg-id + id: doc_2 + body: + non_inference_field: "non-inference test" + agg_id: "doc_2" + refresh: true + + - do: + search: + index: test-dense-index-with-agg-id + body: + size: 0 + query: + semantic: + field: "inference_field" + query: "inference test" + aggs: + agg_ids: + terms: + field: agg_id + + - match: { hits.total.value: 1 } + - length: { hits.hits: 0 } + - length: { aggregations.agg_ids.buckets: 1 } + - match: { aggregations.agg_ids.buckets.0.key: "doc_1" } + - match: { aggregations.agg_ids.buckets.0.doc_count: 1 } diff --git a/x-pack/plugin/kql/build.gradle b/x-pack/plugin/kql/build.gradle index 7e4df5654f225..76a4bd5aff777 100644 --- a/x-pack/plugin/kql/build.gradle +++ b/x-pack/plugin/kql/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ import static org.elasticsearch.gradle.util.PlatformUtils.normalize @@ -26,14 +31,8 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) } -tasks.named('yamlRestTest') { +tasks.named('yamlRestTest').configure { usesDefaultDistribution() -}.configure { - /**************************************************************** - * Enable QA/rest integration tests for snapshot builds only * - * TODO: Enable for all builds upon this feature release * - ****************************************************************/ - enabled = BuildParams.isSnapshotBuild() } /********************************** diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 index da015b699cb15..739fa5eb0c6eb 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 @@ -46,9 +46,26 @@ notQuery: ; nestedQuery - : fieldName COLON LEFT_CURLY_BRACKET query RIGHT_CURLY_BRACKET + : fieldName COLON LEFT_CURLY_BRACKET nestedSubQuery RIGHT_CURLY_BRACKET ; +nestedSubQuery + : nestedSubQuery operator=(AND|OR) nestedSubQuery #booleanNestedQuery + | nestedSimpleSubQuery #defaultNestedQuery + ; + +nestedSimpleSubQuery + : notQuery + | nestedQuery + | matchAllQuery + | nestedParenthesizedQuery + | existsQuery + | rangeQuery + | fieldQuery; + +nestedParenthesizedQuery + : LEFT_PARENTHESIS nestedSubQuery RIGHT_PARENTHESIS; + matchAllQuery : (WILDCARD COLON)? WILDCARD ; diff --git a/x-pack/plugin/kql/src/main/java/module-info.java b/x-pack/plugin/kql/src/main/java/module-info.java index 41e51033b9c70..e3bb6fb99bbd3 100644 --- a/x-pack/plugin/kql/src/main/java/module-info.java +++ b/x-pack/plugin/kql/src/main/java/module-info.java @@ -13,6 +13,7 @@ requires org.apache.lucene.queryparser; requires org.elasticsearch.logging; requires org.apache.lucene.core; + requires org.apache.lucene.join; exports org.elasticsearch.xpack.kql; exports org.elasticsearch.xpack.kql.parser; diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java index 5fe3a61c0a761..2d810a33190ca 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlAstBuilder.java @@ -9,6 +9,7 @@ import org.antlr.v4.runtime.ParserRuleContext; import org.antlr.v4.runtime.Token; +import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -20,6 +21,7 @@ import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import java.util.List; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BiFunction; @@ -56,15 +58,15 @@ public QueryBuilder toQueryBuilder(ParserRuleContext ctx) { @Override public QueryBuilder visitBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { assert ctx.operator != null; - return isAndQuery(ctx) ? visitAndBooleanQuery(ctx) : visitOrBooleanQuery(ctx); + return isAndQuery(ctx) ? visitAndBooleanQuery(ctx.query()) : visitOrBooleanQuery(ctx.query()); } - public QueryBuilder visitAndBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + public QueryBuilder visitAndBooleanQuery(List clauses) { BoolQueryBuilder builder = QueryBuilders.boolQuery(); // TODO: KQLContext has an option to wrap the clauses into a filter instead of a must clause. Do we need it? - for (ParserRuleContext subQueryCtx : ctx.query()) { - if (subQueryCtx instanceof KqlBaseParser.BooleanQueryContext booleanSubQueryCtx && isAndQuery(booleanSubQueryCtx)) { + for (ParserRuleContext subQueryCtx : clauses) { + if (isAndQuery(subQueryCtx)) { typedParsing(this, subQueryCtx, BoolQueryBuilder.class).must().forEach(builder::must); } else { builder.must(typedParsing(this, subQueryCtx, QueryBuilder.class)); @@ -74,11 +76,11 @@ public QueryBuilder visitAndBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) return rewriteConjunctionQuery(builder); } - public QueryBuilder visitOrBooleanQuery(KqlBaseParser.BooleanQueryContext ctx) { + public QueryBuilder visitOrBooleanQuery(List clauses) { BoolQueryBuilder builder = QueryBuilders.boolQuery().minimumShouldMatch(1); - for (ParserRuleContext subQueryCtx : ctx.query()) { - if (subQueryCtx instanceof KqlBaseParser.BooleanQueryContext booleanSubQueryCtx && isOrQuery(booleanSubQueryCtx)) { + for (ParserRuleContext subQueryCtx : clauses) { + if (isOrQuery(subQueryCtx)) { typedParsing(this, subQueryCtx, BoolQueryBuilder.class).should().forEach(builder::should); } else { builder.should(typedParsing(this, subQueryCtx, QueryBuilder.class)); @@ -100,8 +102,40 @@ public QueryBuilder visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryCont @Override public QueryBuilder visitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { - // TODO: implementation - return new MatchNoneQueryBuilder(); + String nestedFieldName = extractText(ctx.fieldName()); + + if (kqlParsingContext.isNestedField(nestedFieldName) == false) { + throw new KqlParsingException( + "[{}] is not a valid nested field name.", + ctx.start.getLine(), + ctx.start.getCharPositionInLine(), + nestedFieldName + ); + } + QueryBuilder subQuery = kqlParsingContext.withNestedPath( + nestedFieldName, + () -> typedParsing(this, ctx.nestedSubQuery(), QueryBuilder.class) + ); + + if (subQuery instanceof MatchNoneQueryBuilder) { + return subQuery; + } + + return wrapWithNestedQuery( + nestedFieldName, + QueryBuilders.nestedQuery(kqlParsingContext.fullFieldName(nestedFieldName), subQuery, ScoreMode.None) + ); + } + + @Override + public QueryBuilder visitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { + assert ctx.operator != null; + return isAndQuery(ctx) ? visitAndBooleanQuery(ctx.nestedSubQuery()) : visitOrBooleanQuery(ctx.nestedSubQuery()); + } + + @Override + public QueryBuilder visitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { + return typedParsing(this, ctx.nestedSubQuery(), QueryBuilder.class); } @Override @@ -116,7 +150,7 @@ public QueryBuilder visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery().minimumShouldMatch(1); withFields(ctx.fieldName(), (fieldName, mappedFieldType) -> { if (isRuntimeField(mappedFieldType) == false) { - boolQueryBuilder.should(QueryBuilders.existsQuery(fieldName)); + boolQueryBuilder.should(wrapWithNestedQuery(fieldName, QueryBuilders.existsQuery(fieldName))); } }); @@ -137,7 +171,7 @@ public QueryBuilder visitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { rangeQuery.timeZone(kqlParsingContext.timeZone().getId()); } - boolQueryBuilder.should(rangeQuery); + boolQueryBuilder.should(wrapWithNestedQuery(fieldName, rangeQuery)); }); return rewriteDisjunctionQuery(boolQueryBuilder); @@ -200,24 +234,33 @@ public QueryBuilder visitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } if (fieldQuery != null) { - boolQueryBuilder.should(fieldQuery); + boolQueryBuilder.should(wrapWithNestedQuery(fieldName, fieldQuery)); } }); return rewriteDisjunctionQuery(boolQueryBuilder); } - private static boolean isAndQuery(KqlBaseParser.BooleanQueryContext ctx) { - return ctx.operator.getType() == KqlBaseParser.AND; + private static boolean isAndQuery(ParserRuleContext ctx) { + return switch (ctx) { + case KqlBaseParser.BooleanQueryContext booleanQueryCtx -> booleanQueryCtx.operator.getType() == KqlBaseParser.AND; + case KqlBaseParser.BooleanNestedQueryContext booleanNestedCtx -> booleanNestedCtx.operator.getType() == KqlBaseParser.AND; + default -> false; + }; } - private static boolean isOrQuery(KqlBaseParser.BooleanQueryContext ctx) { - return ctx.operator.getType() == KqlBaseParser.OR; + private static boolean isOrQuery(ParserRuleContext ctx) { + return switch (ctx) { + case KqlBaseParser.BooleanQueryContext booleanQueryCtx -> booleanQueryCtx.operator.getType() == KqlBaseParser.OR; + case KqlBaseParser.BooleanNestedQueryContext booleanNestedCtx -> booleanNestedCtx.operator.getType() == KqlBaseParser.OR; + default -> false; + }; } private void withFields(KqlBaseParser.FieldNameContext ctx, BiConsumer fieldConsummer) { assert ctx != null : "Field ctx cannot be null"; String fieldNamePattern = extractText(ctx); + Set fieldNames = kqlParsingContext.resolveFieldNames(fieldNamePattern); if (ctx.value.getType() == KqlBaseParser.QUOTED_STRING && Regex.isSimpleMatchPattern(fieldNamePattern)) { @@ -267,4 +310,14 @@ private BiFunction rangeOperation( default -> throw new IllegalArgumentException(format(null, "Invalid range operator {}\"", operator.getText())); }; } + + private QueryBuilder wrapWithNestedQuery(String fieldName, QueryBuilder query) { + String nestedPath = kqlParsingContext.nestedPath(fieldName); + + if (nestedPath == null || nestedPath.equals(kqlParsingContext.currentNestedPath())) { + return query; + } + + return wrapWithNestedQuery(nestedPath, QueryBuilders.nestedQuery(nestedPath, query, ScoreMode.None)); + } } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp index 7af37d7e3c3b5..fbfe52afa4cd5 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp @@ -42,6 +42,9 @@ query simpleQuery notQuery nestedQuery +nestedSubQuery +nestedSimpleSubQuery +nestedParenthesizedQuery matchAllQuery parenthesizedQuery rangeQuery @@ -54,4 +57,4 @@ fieldName atn: -[4, 1, 16, 136, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 1, 0, 3, 0, 30, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 40, 8, 1, 10, 1, 12, 1, 43, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 3, 5, 66, 8, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 4, 8, 79, 8, 8, 11, 8, 12, 8, 80, 1, 8, 3, 8, 84, 8, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 100, 8, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 3, 11, 107, 8, 11, 1, 12, 3, 12, 110, 8, 12, 1, 12, 4, 12, 113, 8, 12, 11, 12, 12, 12, 114, 1, 12, 3, 12, 118, 8, 12, 1, 12, 1, 12, 3, 12, 122, 8, 12, 1, 12, 1, 12, 3, 12, 126, 8, 12, 1, 12, 3, 12, 129, 8, 12, 1, 13, 1, 13, 1, 13, 3, 13, 134, 8, 13, 1, 13, 0, 1, 2, 14, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 145, 0, 29, 1, 0, 0, 0, 2, 33, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 57, 1, 0, 0, 0, 10, 65, 1, 0, 0, 0, 12, 69, 1, 0, 0, 0, 14, 73, 1, 0, 0, 0, 16, 83, 1, 0, 0, 0, 18, 85, 1, 0, 0, 0, 20, 99, 1, 0, 0, 0, 22, 106, 1, 0, 0, 0, 24, 128, 1, 0, 0, 0, 26, 133, 1, 0, 0, 0, 28, 30, 3, 2, 1, 0, 29, 28, 1, 0, 0, 0, 29, 30, 1, 0, 0, 0, 30, 31, 1, 0, 0, 0, 31, 32, 5, 0, 0, 1, 32, 1, 1, 0, 0, 0, 33, 34, 6, 1, -1, 0, 34, 35, 3, 4, 2, 0, 35, 41, 1, 0, 0, 0, 36, 37, 10, 2, 0, 0, 37, 38, 7, 0, 0, 0, 38, 40, 3, 2, 1, 2, 39, 36, 1, 0, 0, 0, 40, 43, 1, 0, 0, 0, 41, 39, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 41, 1, 0, 0, 0, 44, 53, 3, 6, 3, 0, 45, 53, 3, 8, 4, 0, 46, 53, 3, 12, 6, 0, 47, 53, 3, 10, 5, 0, 48, 53, 3, 18, 9, 0, 49, 53, 3, 14, 7, 0, 50, 53, 3, 20, 10, 0, 51, 53, 3, 22, 11, 0, 52, 44, 1, 0, 0, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 5, 4, 0, 0, 55, 56, 3, 4, 2, 0, 56, 7, 1, 0, 0, 0, 57, 58, 3, 26, 13, 0, 58, 59, 5, 5, 0, 0, 59, 60, 5, 12, 0, 0, 60, 61, 3, 2, 1, 0, 61, 62, 5, 13, 0, 0, 62, 9, 1, 0, 0, 0, 63, 64, 5, 16, 0, 0, 64, 66, 5, 5, 0, 0, 65, 63, 1, 0, 0, 0, 65, 66, 1, 0, 0, 0, 66, 67, 1, 0, 0, 0, 67, 68, 5, 16, 0, 0, 68, 11, 1, 0, 0, 0, 69, 70, 5, 10, 0, 0, 70, 71, 3, 2, 1, 0, 71, 72, 5, 11, 0, 0, 72, 13, 1, 0, 0, 0, 73, 74, 3, 26, 13, 0, 74, 75, 7, 1, 0, 0, 75, 76, 3, 16, 8, 0, 76, 15, 1, 0, 0, 0, 77, 79, 7, 2, 0, 0, 78, 77, 1, 0, 0, 0, 79, 80, 1, 0, 0, 0, 80, 78, 1, 0, 0, 0, 80, 81, 1, 0, 0, 0, 81, 84, 1, 0, 0, 0, 82, 84, 5, 15, 0, 0, 83, 78, 1, 0, 0, 0, 83, 82, 1, 0, 0, 0, 84, 17, 1, 0, 0, 0, 85, 86, 3, 26, 13, 0, 86, 87, 5, 5, 0, 0, 87, 88, 5, 16, 0, 0, 88, 19, 1, 0, 0, 0, 89, 90, 3, 26, 13, 0, 90, 91, 5, 5, 0, 0, 91, 92, 3, 24, 12, 0, 92, 100, 1, 0, 0, 0, 93, 94, 3, 26, 13, 0, 94, 95, 5, 5, 0, 0, 95, 96, 5, 10, 0, 0, 96, 97, 3, 24, 12, 0, 97, 98, 5, 11, 0, 0, 98, 100, 1, 0, 0, 0, 99, 89, 1, 0, 0, 0, 99, 93, 1, 0, 0, 0, 100, 21, 1, 0, 0, 0, 101, 107, 3, 24, 12, 0, 102, 103, 5, 10, 0, 0, 103, 104, 3, 24, 12, 0, 104, 105, 5, 11, 0, 0, 105, 107, 1, 0, 0, 0, 106, 101, 1, 0, 0, 0, 106, 102, 1, 0, 0, 0, 107, 23, 1, 0, 0, 0, 108, 110, 7, 3, 0, 0, 109, 108, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 112, 1, 0, 0, 0, 111, 113, 7, 2, 0, 0, 112, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 112, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 7, 3, 0, 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 129, 1, 0, 0, 0, 119, 121, 7, 0, 0, 0, 120, 122, 7, 3, 0, 0, 121, 120, 1, 0, 0, 0, 121, 122, 1, 0, 0, 0, 122, 129, 1, 0, 0, 0, 123, 125, 5, 4, 0, 0, 124, 126, 7, 0, 0, 0, 125, 124, 1, 0, 0, 0, 125, 126, 1, 0, 0, 0, 126, 129, 1, 0, 0, 0, 127, 129, 5, 15, 0, 0, 128, 109, 1, 0, 0, 0, 128, 119, 1, 0, 0, 0, 128, 123, 1, 0, 0, 0, 128, 127, 1, 0, 0, 0, 129, 25, 1, 0, 0, 0, 130, 134, 5, 14, 0, 0, 131, 134, 5, 15, 0, 0, 132, 134, 5, 16, 0, 0, 133, 130, 1, 0, 0, 0, 133, 131, 1, 0, 0, 0, 133, 132, 1, 0, 0, 0, 134, 27, 1, 0, 0, 0, 15, 29, 41, 52, 65, 80, 83, 99, 106, 109, 114, 117, 121, 125, 128, 133] \ No newline at end of file +[4, 1, 16, 165, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 1, 0, 3, 0, 36, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 46, 8, 1, 10, 1, 12, 1, 49, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 59, 8, 2, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 76, 8, 5, 10, 5, 12, 5, 79, 9, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 1, 6, 3, 6, 87, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 3, 8, 95, 8, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 4, 11, 108, 8, 11, 11, 11, 12, 11, 109, 1, 11, 3, 11, 113, 8, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 3, 13, 129, 8, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 3, 14, 136, 8, 14, 1, 15, 3, 15, 139, 8, 15, 1, 15, 4, 15, 142, 8, 15, 11, 15, 12, 15, 143, 1, 15, 3, 15, 147, 8, 15, 1, 15, 1, 15, 3, 15, 151, 8, 15, 1, 15, 1, 15, 3, 15, 155, 8, 15, 1, 15, 3, 15, 158, 8, 15, 1, 16, 1, 16, 1, 16, 3, 16, 163, 8, 16, 1, 16, 0, 2, 2, 10, 17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 177, 0, 35, 1, 0, 0, 0, 2, 39, 1, 0, 0, 0, 4, 58, 1, 0, 0, 0, 6, 60, 1, 0, 0, 0, 8, 63, 1, 0, 0, 0, 10, 69, 1, 0, 0, 0, 12, 86, 1, 0, 0, 0, 14, 88, 1, 0, 0, 0, 16, 94, 1, 0, 0, 0, 18, 98, 1, 0, 0, 0, 20, 102, 1, 0, 0, 0, 22, 112, 1, 0, 0, 0, 24, 114, 1, 0, 0, 0, 26, 128, 1, 0, 0, 0, 28, 135, 1, 0, 0, 0, 30, 157, 1, 0, 0, 0, 32, 162, 1, 0, 0, 0, 34, 36, 3, 2, 1, 0, 35, 34, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 37, 1, 0, 0, 0, 37, 38, 5, 0, 0, 1, 38, 1, 1, 0, 0, 0, 39, 40, 6, 1, -1, 0, 40, 41, 3, 4, 2, 0, 41, 47, 1, 0, 0, 0, 42, 43, 10, 2, 0, 0, 43, 44, 7, 0, 0, 0, 44, 46, 3, 2, 1, 2, 45, 42, 1, 0, 0, 0, 46, 49, 1, 0, 0, 0, 47, 45, 1, 0, 0, 0, 47, 48, 1, 0, 0, 0, 48, 3, 1, 0, 0, 0, 49, 47, 1, 0, 0, 0, 50, 59, 3, 6, 3, 0, 51, 59, 3, 8, 4, 0, 52, 59, 3, 18, 9, 0, 53, 59, 3, 16, 8, 0, 54, 59, 3, 24, 12, 0, 55, 59, 3, 20, 10, 0, 56, 59, 3, 26, 13, 0, 57, 59, 3, 28, 14, 0, 58, 50, 1, 0, 0, 0, 58, 51, 1, 0, 0, 0, 58, 52, 1, 0, 0, 0, 58, 53, 1, 0, 0, 0, 58, 54, 1, 0, 0, 0, 58, 55, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 58, 57, 1, 0, 0, 0, 59, 5, 1, 0, 0, 0, 60, 61, 5, 4, 0, 0, 61, 62, 3, 4, 2, 0, 62, 7, 1, 0, 0, 0, 63, 64, 3, 32, 16, 0, 64, 65, 5, 5, 0, 0, 65, 66, 5, 12, 0, 0, 66, 67, 3, 10, 5, 0, 67, 68, 5, 13, 0, 0, 68, 9, 1, 0, 0, 0, 69, 70, 6, 5, -1, 0, 70, 71, 3, 12, 6, 0, 71, 77, 1, 0, 0, 0, 72, 73, 10, 2, 0, 0, 73, 74, 7, 0, 0, 0, 74, 76, 3, 10, 5, 2, 75, 72, 1, 0, 0, 0, 76, 79, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 77, 78, 1, 0, 0, 0, 78, 11, 1, 0, 0, 0, 79, 77, 1, 0, 0, 0, 80, 87, 3, 6, 3, 0, 81, 87, 3, 8, 4, 0, 82, 87, 3, 14, 7, 0, 83, 87, 3, 24, 12, 0, 84, 87, 3, 20, 10, 0, 85, 87, 3, 26, 13, 0, 86, 80, 1, 0, 0, 0, 86, 81, 1, 0, 0, 0, 86, 82, 1, 0, 0, 0, 86, 83, 1, 0, 0, 0, 86, 84, 1, 0, 0, 0, 86, 85, 1, 0, 0, 0, 87, 13, 1, 0, 0, 0, 88, 89, 5, 10, 0, 0, 89, 90, 3, 10, 5, 0, 90, 91, 5, 11, 0, 0, 91, 15, 1, 0, 0, 0, 92, 93, 5, 16, 0, 0, 93, 95, 5, 5, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 97, 5, 16, 0, 0, 97, 17, 1, 0, 0, 0, 98, 99, 5, 10, 0, 0, 99, 100, 3, 2, 1, 0, 100, 101, 5, 11, 0, 0, 101, 19, 1, 0, 0, 0, 102, 103, 3, 32, 16, 0, 103, 104, 7, 1, 0, 0, 104, 105, 3, 22, 11, 0, 105, 21, 1, 0, 0, 0, 106, 108, 7, 2, 0, 0, 107, 106, 1, 0, 0, 0, 108, 109, 1, 0, 0, 0, 109, 107, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 113, 1, 0, 0, 0, 111, 113, 5, 15, 0, 0, 112, 107, 1, 0, 0, 0, 112, 111, 1, 0, 0, 0, 113, 23, 1, 0, 0, 0, 114, 115, 3, 32, 16, 0, 115, 116, 5, 5, 0, 0, 116, 117, 5, 16, 0, 0, 117, 25, 1, 0, 0, 0, 118, 119, 3, 32, 16, 0, 119, 120, 5, 5, 0, 0, 120, 121, 3, 30, 15, 0, 121, 129, 1, 0, 0, 0, 122, 123, 3, 32, 16, 0, 123, 124, 5, 5, 0, 0, 124, 125, 5, 10, 0, 0, 125, 126, 3, 30, 15, 0, 126, 127, 5, 11, 0, 0, 127, 129, 1, 0, 0, 0, 128, 118, 1, 0, 0, 0, 128, 122, 1, 0, 0, 0, 129, 27, 1, 0, 0, 0, 130, 136, 3, 30, 15, 0, 131, 132, 5, 10, 0, 0, 132, 133, 3, 30, 15, 0, 133, 134, 5, 11, 0, 0, 134, 136, 1, 0, 0, 0, 135, 130, 1, 0, 0, 0, 135, 131, 1, 0, 0, 0, 136, 29, 1, 0, 0, 0, 137, 139, 7, 3, 0, 0, 138, 137, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 141, 1, 0, 0, 0, 140, 142, 7, 2, 0, 0, 141, 140, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 141, 1, 0, 0, 0, 143, 144, 1, 0, 0, 0, 144, 146, 1, 0, 0, 0, 145, 147, 7, 3, 0, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 158, 1, 0, 0, 0, 148, 150, 7, 0, 0, 0, 149, 151, 7, 3, 0, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 158, 1, 0, 0, 0, 152, 154, 5, 4, 0, 0, 153, 155, 7, 0, 0, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 158, 1, 0, 0, 0, 156, 158, 5, 15, 0, 0, 157, 138, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157, 152, 1, 0, 0, 0, 157, 156, 1, 0, 0, 0, 158, 31, 1, 0, 0, 0, 159, 163, 5, 14, 0, 0, 160, 163, 5, 15, 0, 0, 161, 163, 5, 16, 0, 0, 162, 159, 1, 0, 0, 0, 162, 160, 1, 0, 0, 0, 162, 161, 1, 0, 0, 0, 163, 33, 1, 0, 0, 0, 17, 35, 47, 58, 77, 86, 94, 109, 112, 128, 135, 138, 143, 146, 150, 154, 157, 162] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java index e1015edcd4931..c3fc1281b6fd9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java @@ -92,6 +92,54 @@ class KqlBaseBaseListener implements KqlBaseListener { *

    The default implementation does nothing.

    */ @Override public void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void enterNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

    The default implementation does nothing.

    + */ + @Override public void exitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java index 3973a647c8cd8..84c882c2e2bcf 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java @@ -62,6 +62,34 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa * {@link #visitChildren} on {@code ctx}.

    */ @Override public T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

    The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

    + */ + @Override public T visitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java index 49f2031208642..a44ecf1ecad23 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java @@ -79,6 +79,50 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx); + /** + * Enter a parse tree produced by the {@code booleanNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void enterBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx); + /** + * Exit a parse tree produced by the {@code booleanNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void exitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx); + /** + * Enter a parse tree produced by the {@code defaultNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void enterDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx); + /** + * Exit a parse tree produced by the {@code defaultNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + */ + void exitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx); + /** + * Enter a parse tree produced by {@link KqlBaseParser#nestedSimpleSubQuery}. + * @param ctx the parse tree + */ + void enterNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx); + /** + * Exit a parse tree produced by {@link KqlBaseParser#nestedSimpleSubQuery}. + * @param ctx the parse tree + */ + void exitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx); + /** + * Enter a parse tree produced by {@link KqlBaseParser#nestedParenthesizedQuery}. + * @param ctx the parse tree + */ + void enterNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx); + /** + * Exit a parse tree produced by {@link KqlBaseParser#nestedParenthesizedQuery}. + * @param ctx the parse tree + */ + void exitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java index 118ac32aadd61..7e797b9edbb93 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java @@ -30,12 +30,15 @@ class KqlBaseParser extends Parser { RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static final int RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_notQuery = 3, - RULE_nestedQuery = 4, RULE_matchAllQuery = 5, RULE_parenthesizedQuery = 6, - RULE_rangeQuery = 7, RULE_rangeQueryValue = 8, RULE_existsQuery = 9, RULE_fieldQuery = 10, - RULE_fieldLessQuery = 11, RULE_fieldQueryValue = 12, RULE_fieldName = 13; + RULE_nestedQuery = 4, RULE_nestedSubQuery = 5, RULE_nestedSimpleSubQuery = 6, + RULE_nestedParenthesizedQuery = 7, RULE_matchAllQuery = 8, RULE_parenthesizedQuery = 9, + RULE_rangeQuery = 10, RULE_rangeQueryValue = 11, RULE_existsQuery = 12, + RULE_fieldQuery = 13, RULE_fieldLessQuery = 14, RULE_fieldQueryValue = 15, + RULE_fieldName = 16; private static String[] makeRuleNames() { return new String[] { - "topLevelQuery", "query", "simpleQuery", "notQuery", "nestedQuery", "matchAllQuery", + "topLevelQuery", "query", "simpleQuery", "notQuery", "nestedQuery", "nestedSubQuery", + "nestedSimpleSubQuery", "nestedParenthesizedQuery", "matchAllQuery", "parenthesizedQuery", "rangeQuery", "rangeQueryValue", "existsQuery", "fieldQuery", "fieldLessQuery", "fieldQueryValue", "fieldName" }; @@ -139,17 +142,17 @@ public final TopLevelQueryContext topLevelQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(29); + setState(35); _errHandler.sync(this); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 115740L) != 0)) { { - setState(28); + setState(34); query(0); } } - setState(31); + setState(37); match(EOF); } } @@ -244,11 +247,11 @@ private QueryContext query(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(34); + setState(40); simpleQuery(); } _ctx.stop = _input.LT(-1); - setState(41); + setState(47); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,1,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -259,9 +262,9 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new BooleanQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(36); + setState(42); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(37); + setState(43); ((BooleanQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { @@ -272,12 +275,12 @@ private QueryContext query(int _p) throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(38); + setState(44); query(2); } } } - setState(43); + setState(49); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,1,_ctx); } @@ -343,62 +346,62 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { SimpleQueryContext _localctx = new SimpleQueryContext(_ctx, getState()); enterRule(_localctx, 4, RULE_simpleQuery); try { - setState(52); + setState(58); _errHandler.sync(this); switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(44); + setState(50); notQuery(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(45); + setState(51); nestedQuery(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(46); + setState(52); parenthesizedQuery(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(47); + setState(53); matchAllQuery(); } break; case 5: enterOuterAlt(_localctx, 5); { - setState(48); + setState(54); existsQuery(); } break; case 6: enterOuterAlt(_localctx, 6); { - setState(49); + setState(55); rangeQuery(); } break; case 7: enterOuterAlt(_localctx, 7); { - setState(50); + setState(56); fieldQuery(); } break; case 8: enterOuterAlt(_localctx, 8); { - setState(51); + setState(57); fieldLessQuery(); } break; @@ -447,9 +450,9 @@ public final NotQueryContext notQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(54); + setState(60); match(NOT); - setState(55); + setState(61); ((NotQueryContext)_localctx).subQuery = simpleQuery(); } } @@ -471,8 +474,8 @@ public FieldNameContext fieldName() { } public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } public TerminalNode LEFT_CURLY_BRACKET() { return getToken(KqlBaseParser.LEFT_CURLY_BRACKET, 0); } - public QueryContext query() { - return getRuleContext(QueryContext.class,0); + public NestedSubQueryContext nestedSubQuery() { + return getRuleContext(NestedSubQueryContext.class,0); } public TerminalNode RIGHT_CURLY_BRACKET() { return getToken(KqlBaseParser.RIGHT_CURLY_BRACKET, 0); } public NestedQueryContext(ParserRuleContext parent, int invokingState) { @@ -500,15 +503,15 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(57); + setState(63); fieldName(); - setState(58); + setState(64); match(COLON); - setState(59); + setState(65); match(LEFT_CURLY_BRACKET); - setState(60); - query(0); - setState(61); + setState(66); + nestedSubQuery(0); + setState(67); match(RIGHT_CURLY_BRACKET); } } @@ -523,6 +526,288 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") + public static class NestedSubQueryContext extends ParserRuleContext { + public NestedSubQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nestedSubQuery; } + + public NestedSubQueryContext() { } + public void copyFrom(NestedSubQueryContext ctx) { + super.copyFrom(ctx); + } + } + @SuppressWarnings("CheckReturnValue") + public static class BooleanNestedQueryContext extends NestedSubQueryContext { + public Token operator; + public List nestedSubQuery() { + return getRuleContexts(NestedSubQueryContext.class); + } + public NestedSubQueryContext nestedSubQuery(int i) { + return getRuleContext(NestedSubQueryContext.class,i); + } + public TerminalNode AND() { return getToken(KqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(KqlBaseParser.OR, 0); } + public BooleanNestedQueryContext(NestedSubQueryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterBooleanNestedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitBooleanNestedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitBooleanNestedQuery(this); + else return visitor.visitChildren(this); + } + } + @SuppressWarnings("CheckReturnValue") + public static class DefaultNestedQueryContext extends NestedSubQueryContext { + public NestedSimpleSubQueryContext nestedSimpleSubQuery() { + return getRuleContext(NestedSimpleSubQueryContext.class,0); + } + public DefaultNestedQueryContext(NestedSubQueryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterDefaultNestedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitDefaultNestedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitDefaultNestedQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NestedSubQueryContext nestedSubQuery() throws RecognitionException { + return nestedSubQuery(0); + } + + private NestedSubQueryContext nestedSubQuery(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + NestedSubQueryContext _localctx = new NestedSubQueryContext(_ctx, _parentState); + NestedSubQueryContext _prevctx = _localctx; + int _startState = 10; + enterRecursionRule(_localctx, 10, RULE_nestedSubQuery, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + { + _localctx = new DefaultNestedQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + + setState(70); + nestedSimpleSubQuery(); + } + _ctx.stop = _input.LT(-1); + setState(77); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,3,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + { + _localctx = new BooleanNestedQueryContext(new NestedSubQueryContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_nestedSubQuery); + setState(72); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(73); + ((BooleanNestedQueryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + ((BooleanNestedQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + setState(74); + nestedSubQuery(2); + } + } + } + setState(79); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,3,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class NestedSimpleSubQueryContext extends ParserRuleContext { + public NotQueryContext notQuery() { + return getRuleContext(NotQueryContext.class,0); + } + public NestedQueryContext nestedQuery() { + return getRuleContext(NestedQueryContext.class,0); + } + public NestedParenthesizedQueryContext nestedParenthesizedQuery() { + return getRuleContext(NestedParenthesizedQueryContext.class,0); + } + public ExistsQueryContext existsQuery() { + return getRuleContext(ExistsQueryContext.class,0); + } + public RangeQueryContext rangeQuery() { + return getRuleContext(RangeQueryContext.class,0); + } + public FieldQueryContext fieldQuery() { + return getRuleContext(FieldQueryContext.class,0); + } + public NestedSimpleSubQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nestedSimpleSubQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterNestedSimpleSubQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitNestedSimpleSubQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitNestedSimpleSubQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NestedSimpleSubQueryContext nestedSimpleSubQuery() throws RecognitionException { + NestedSimpleSubQueryContext _localctx = new NestedSimpleSubQueryContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_nestedSimpleSubQuery); + try { + setState(86); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(80); + notQuery(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(81); + nestedQuery(); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(82); + nestedParenthesizedQuery(); + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(83); + existsQuery(); + } + break; + case 5: + enterOuterAlt(_localctx, 5); + { + setState(84); + rangeQuery(); + } + break; + case 6: + enterOuterAlt(_localctx, 6); + { + setState(85); + fieldQuery(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + @SuppressWarnings("CheckReturnValue") + public static class NestedParenthesizedQueryContext extends ParserRuleContext { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public NestedSubQueryContext nestedSubQuery() { + return getRuleContext(NestedSubQueryContext.class,0); + } + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public NestedParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nestedParenthesizedQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterNestedParenthesizedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitNestedParenthesizedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitNestedParenthesizedQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NestedParenthesizedQueryContext nestedParenthesizedQuery() throws RecognitionException { + NestedParenthesizedQueryContext _localctx = new NestedParenthesizedQueryContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_nestedParenthesizedQuery); + try { + enterOuterAlt(_localctx, 1); + { + setState(88); + match(LEFT_PARENTHESIS); + setState(89); + nestedSubQuery(0); + setState(90); + match(RIGHT_PARENTHESIS); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + @SuppressWarnings("CheckReturnValue") public static class MatchAllQueryContext extends ParserRuleContext { public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } @@ -551,23 +836,23 @@ public T accept(ParseTreeVisitor visitor) { public final MatchAllQueryContext matchAllQuery() throws RecognitionException { MatchAllQueryContext _localctx = new MatchAllQueryContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_matchAllQuery); + enterRule(_localctx, 16, RULE_matchAllQuery); try { enterOuterAlt(_localctx, 1); { - setState(65); + setState(94); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { case 1: { - setState(63); + setState(92); match(WILDCARD); - setState(64); + setState(93); match(COLON); } break; } - setState(67); + setState(96); match(WILDCARD); } } @@ -610,15 +895,15 @@ public T accept(ParseTreeVisitor visitor) { public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_parenthesizedQuery); + enterRule(_localctx, 18, RULE_parenthesizedQuery); try { enterOuterAlt(_localctx, 1); { - setState(69); + setState(98); match(LEFT_PARENTHESIS); - setState(70); + setState(99); query(0); - setState(71); + setState(100); match(RIGHT_PARENTHESIS); } } @@ -667,14 +952,14 @@ public T accept(ParseTreeVisitor visitor) { public final RangeQueryContext rangeQuery() throws RecognitionException { RangeQueryContext _localctx = new RangeQueryContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_rangeQuery); + enterRule(_localctx, 20, RULE_rangeQuery); int _la; try { enterOuterAlt(_localctx, 1); { - setState(73); + setState(102); fieldName(); - setState(74); + setState(103); ((RangeQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 960L) != 0)) ) { @@ -685,7 +970,7 @@ public final RangeQueryContext rangeQuery() throws RecognitionException { _errHandler.reportMatch(this); consume(); } - setState(75); + setState(104); rangeQueryValue(); } } @@ -732,18 +1017,18 @@ public T accept(ParseTreeVisitor visitor) { public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_rangeQueryValue); + enterRule(_localctx, 22, RULE_rangeQueryValue); int _la; try { int _alt; - setState(83); + setState(112); _errHandler.sync(this); switch (_input.LA(1)) { case UNQUOTED_LITERAL: case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(78); + setState(107); _errHandler.sync(this); _alt = 1; do { @@ -751,7 +1036,7 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio case 1: { { - setState(77); + setState(106); _la = _input.LA(1); if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { _errHandler.recoverInline(this); @@ -767,16 +1052,16 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio default: throw new NoViableAltException(this); } - setState(80); + setState(109); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,4,_ctx); + _alt = getInterpreter().adaptivePredict(_input,6,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; case QUOTED_STRING: enterOuterAlt(_localctx, 2); { - setState(82); + setState(111); match(QUOTED_STRING); } break; @@ -823,15 +1108,15 @@ public T accept(ParseTreeVisitor visitor) { public final ExistsQueryContext existsQuery() throws RecognitionException { ExistsQueryContext _localctx = new ExistsQueryContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_existsQuery); + enterRule(_localctx, 24, RULE_existsQuery); try { enterOuterAlt(_localctx, 1); { - setState(85); + setState(114); fieldName(); - setState(86); + setState(115); match(COLON); - setState(87); + setState(116); match(WILDCARD); } } @@ -878,34 +1163,34 @@ public T accept(ParseTreeVisitor visitor) { public final FieldQueryContext fieldQuery() throws RecognitionException { FieldQueryContext _localctx = new FieldQueryContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_fieldQuery); + enterRule(_localctx, 26, RULE_fieldQuery); try { - setState(99); + setState(128); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,8,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(89); + setState(118); fieldName(); - setState(90); + setState(119); match(COLON); - setState(91); + setState(120); fieldQueryValue(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(93); + setState(122); fieldName(); - setState(94); + setState(123); match(COLON); - setState(95); + setState(124); match(LEFT_PARENTHESIS); - setState(96); + setState(125); fieldQueryValue(); - setState(97); + setState(126); match(RIGHT_PARENTHESIS); } break; @@ -950,9 +1235,9 @@ public T accept(ParseTreeVisitor visitor) { public final FieldLessQueryContext fieldLessQuery() throws RecognitionException { FieldLessQueryContext _localctx = new FieldLessQueryContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_fieldLessQuery); + enterRule(_localctx, 28, RULE_fieldLessQuery); try { - setState(106); + setState(135); _errHandler.sync(this); switch (_input.LA(1)) { case AND: @@ -963,18 +1248,18 @@ public final FieldLessQueryContext fieldLessQuery() throws RecognitionException case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(101); + setState(130); fieldQueryValue(); } break; case LEFT_PARENTHESIS: enterOuterAlt(_localctx, 2); { - setState(102); + setState(131); match(LEFT_PARENTHESIS); - setState(103); + setState(132); fieldQueryValue(); - setState(104); + setState(133); match(RIGHT_PARENTHESIS); } break; @@ -1037,22 +1322,22 @@ public T accept(ParseTreeVisitor visitor) { public final FieldQueryValueContext fieldQueryValue() throws RecognitionException { FieldQueryValueContext _localctx = new FieldQueryValueContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_fieldQueryValue); + enterRule(_localctx, 30, RULE_fieldQueryValue); int _la; try { int _alt; - setState(128); + setState(157); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,15,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(109); + setState(138); _errHandler.sync(this); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) { { - setState(108); + setState(137); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); @@ -1065,7 +1350,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio } } - setState(112); + setState(141); _errHandler.sync(this); _alt = 1; do { @@ -1073,7 +1358,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 1: { { - setState(111); + setState(140); _la = _input.LA(1); if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { _errHandler.recoverInline(this); @@ -1089,16 +1374,16 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio default: throw new NoViableAltException(this); } - setState(114); + setState(143); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,9,_ctx); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(117); + setState(146); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,10,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: { - setState(116); + setState(145); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); @@ -1116,7 +1401,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 2: enterOuterAlt(_localctx, 2); { - setState(119); + setState(148); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { _errHandler.recoverInline(this); @@ -1126,12 +1411,12 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio _errHandler.reportMatch(this); consume(); } - setState(121); + setState(150); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: { - setState(120); + setState(149); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { _errHandler.recoverInline(this); @@ -1149,14 +1434,14 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 3: enterOuterAlt(_localctx, 3); { - setState(123); + setState(152); match(NOT); - setState(125); + setState(154); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: { - setState(124); + setState(153); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { _errHandler.recoverInline(this); @@ -1174,7 +1459,7 @@ public final FieldQueryValueContext fieldQueryValue() throws RecognitionExceptio case 4: enterOuterAlt(_localctx, 4); { - setState(127); + setState(156); match(QUOTED_STRING); } break; @@ -1218,29 +1503,29 @@ public T accept(ParseTreeVisitor visitor) { public final FieldNameContext fieldName() throws RecognitionException { FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); - enterRule(_localctx, 26, RULE_fieldName); + enterRule(_localctx, 32, RULE_fieldName); try { - setState(133); + setState(162); _errHandler.sync(this); switch (_input.LA(1)) { case UNQUOTED_LITERAL: enterOuterAlt(_localctx, 1); { - setState(130); + setState(159); ((FieldNameContext)_localctx).value = match(UNQUOTED_LITERAL); } break; case QUOTED_STRING: enterOuterAlt(_localctx, 2); { - setState(131); + setState(160); ((FieldNameContext)_localctx).value = match(QUOTED_STRING); } break; case WILDCARD: enterOuterAlt(_localctx, 3); { - setState(132); + setState(161); ((FieldNameContext)_localctx).value = match(WILDCARD); } break; @@ -1263,6 +1548,8 @@ public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { case 1: return query_sempred((QueryContext)_localctx, predIndex); + case 5: + return nestedSubQuery_sempred((NestedSubQueryContext)_localctx, predIndex); } return true; } @@ -1273,87 +1560,117 @@ private boolean query_sempred(QueryContext _localctx, int predIndex) { } return true; } + private boolean nestedSubQuery_sempred(NestedSubQueryContext _localctx, int predIndex) { + switch (predIndex) { + case 1: + return precpred(_ctx, 2); + } + return true; + } public static final String _serializedATN = - "\u0004\u0001\u0010\u0088\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0004\u0001\u0010\u00a5\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ - "\u0002\f\u0007\f\u0002\r\u0007\r\u0001\u0000\u0003\u0000\u001e\b\u0000"+ - "\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0001\u0001\u0005\u0001(\b\u0001\n\u0001\f\u0001+\t\u0001"+ - "\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002"+ - "\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001\u0003"+ - "\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ - "\u0001\u0004\u0001\u0005\u0001\u0005\u0003\u0005B\b\u0005\u0001\u0005"+ - "\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007"+ - "\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0004\bO\b\b\u000b\b\f\b"+ - "P\u0001\b\u0003\bT\b\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n"+ - "\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003"+ - "\nd\b\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0003"+ - "\u000bk\b\u000b\u0001\f\u0003\fn\b\f\u0001\f\u0004\fq\b\f\u000b\f\f\f"+ - "r\u0001\f\u0003\fv\b\f\u0001\f\u0001\f\u0003\fz\b\f\u0001\f\u0001\f\u0003"+ - "\f~\b\f\u0001\f\u0003\f\u0081\b\f\u0001\r\u0001\r\u0001\r\u0003\r\u0086"+ - "\b\r\u0001\r\u0000\u0001\u0002\u000e\u0000\u0002\u0004\u0006\b\n\f\u000e"+ - "\u0010\u0012\u0014\u0016\u0018\u001a\u0000\u0004\u0001\u0000\u0002\u0003"+ - "\u0001\u0000\u0006\t\u0002\u0000\u000e\u000e\u0010\u0010\u0001\u0000\u0002"+ - "\u0004\u0091\u0000\u001d\u0001\u0000\u0000\u0000\u0002!\u0001\u0000\u0000"+ - "\u0000\u00044\u0001\u0000\u0000\u0000\u00066\u0001\u0000\u0000\u0000\b"+ - "9\u0001\u0000\u0000\u0000\nA\u0001\u0000\u0000\u0000\fE\u0001\u0000\u0000"+ - "\u0000\u000eI\u0001\u0000\u0000\u0000\u0010S\u0001\u0000\u0000\u0000\u0012"+ - "U\u0001\u0000\u0000\u0000\u0014c\u0001\u0000\u0000\u0000\u0016j\u0001"+ - "\u0000\u0000\u0000\u0018\u0080\u0001\u0000\u0000\u0000\u001a\u0085\u0001"+ - "\u0000\u0000\u0000\u001c\u001e\u0003\u0002\u0001\u0000\u001d\u001c\u0001"+ - "\u0000\u0000\u0000\u001d\u001e\u0001\u0000\u0000\u0000\u001e\u001f\u0001"+ - "\u0000\u0000\u0000\u001f \u0005\u0000\u0000\u0001 \u0001\u0001\u0000\u0000"+ - "\u0000!\"\u0006\u0001\uffff\uffff\u0000\"#\u0003\u0004\u0002\u0000#)\u0001"+ - "\u0000\u0000\u0000$%\n\u0002\u0000\u0000%&\u0007\u0000\u0000\u0000&(\u0003"+ - "\u0002\u0001\u0002\'$\u0001\u0000\u0000\u0000(+\u0001\u0000\u0000\u0000"+ - ")\'\u0001\u0000\u0000\u0000)*\u0001\u0000\u0000\u0000*\u0003\u0001\u0000"+ - "\u0000\u0000+)\u0001\u0000\u0000\u0000,5\u0003\u0006\u0003\u0000-5\u0003"+ - "\b\u0004\u0000.5\u0003\f\u0006\u0000/5\u0003\n\u0005\u000005\u0003\u0012"+ - "\t\u000015\u0003\u000e\u0007\u000025\u0003\u0014\n\u000035\u0003\u0016"+ - "\u000b\u00004,\u0001\u0000\u0000\u00004-\u0001\u0000\u0000\u00004.\u0001"+ - "\u0000\u0000\u00004/\u0001\u0000\u0000\u000040\u0001\u0000\u0000\u0000"+ - "41\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u000043\u0001\u0000\u0000"+ - "\u00005\u0005\u0001\u0000\u0000\u000067\u0005\u0004\u0000\u000078\u0003"+ - "\u0004\u0002\u00008\u0007\u0001\u0000\u0000\u00009:\u0003\u001a\r\u0000"+ - ":;\u0005\u0005\u0000\u0000;<\u0005\f\u0000\u0000<=\u0003\u0002\u0001\u0000"+ - "=>\u0005\r\u0000\u0000>\t\u0001\u0000\u0000\u0000?@\u0005\u0010\u0000"+ - "\u0000@B\u0005\u0005\u0000\u0000A?\u0001\u0000\u0000\u0000AB\u0001\u0000"+ - "\u0000\u0000BC\u0001\u0000\u0000\u0000CD\u0005\u0010\u0000\u0000D\u000b"+ - "\u0001\u0000\u0000\u0000EF\u0005\n\u0000\u0000FG\u0003\u0002\u0001\u0000"+ - "GH\u0005\u000b\u0000\u0000H\r\u0001\u0000\u0000\u0000IJ\u0003\u001a\r"+ - "\u0000JK\u0007\u0001\u0000\u0000KL\u0003\u0010\b\u0000L\u000f\u0001\u0000"+ - "\u0000\u0000MO\u0007\u0002\u0000\u0000NM\u0001\u0000\u0000\u0000OP\u0001"+ - "\u0000\u0000\u0000PN\u0001\u0000\u0000\u0000PQ\u0001\u0000\u0000\u0000"+ - "QT\u0001\u0000\u0000\u0000RT\u0005\u000f\u0000\u0000SN\u0001\u0000\u0000"+ - "\u0000SR\u0001\u0000\u0000\u0000T\u0011\u0001\u0000\u0000\u0000UV\u0003"+ - "\u001a\r\u0000VW\u0005\u0005\u0000\u0000WX\u0005\u0010\u0000\u0000X\u0013"+ - "\u0001\u0000\u0000\u0000YZ\u0003\u001a\r\u0000Z[\u0005\u0005\u0000\u0000"+ - "[\\\u0003\u0018\f\u0000\\d\u0001\u0000\u0000\u0000]^\u0003\u001a\r\u0000"+ - "^_\u0005\u0005\u0000\u0000_`\u0005\n\u0000\u0000`a\u0003\u0018\f\u0000"+ - "ab\u0005\u000b\u0000\u0000bd\u0001\u0000\u0000\u0000cY\u0001\u0000\u0000"+ - "\u0000c]\u0001\u0000\u0000\u0000d\u0015\u0001\u0000\u0000\u0000ek\u0003"+ - "\u0018\f\u0000fg\u0005\n\u0000\u0000gh\u0003\u0018\f\u0000hi\u0005\u000b"+ - "\u0000\u0000ik\u0001\u0000\u0000\u0000je\u0001\u0000\u0000\u0000jf\u0001"+ - "\u0000\u0000\u0000k\u0017\u0001\u0000\u0000\u0000ln\u0007\u0003\u0000"+ - "\u0000ml\u0001\u0000\u0000\u0000mn\u0001\u0000\u0000\u0000np\u0001\u0000"+ - "\u0000\u0000oq\u0007\u0002\u0000\u0000po\u0001\u0000\u0000\u0000qr\u0001"+ - "\u0000\u0000\u0000rp\u0001\u0000\u0000\u0000rs\u0001\u0000\u0000\u0000"+ - "su\u0001\u0000\u0000\u0000tv\u0007\u0003\u0000\u0000ut\u0001\u0000\u0000"+ - "\u0000uv\u0001\u0000\u0000\u0000v\u0081\u0001\u0000\u0000\u0000wy\u0007"+ - "\u0000\u0000\u0000xz\u0007\u0003\u0000\u0000yx\u0001\u0000\u0000\u0000"+ - "yz\u0001\u0000\u0000\u0000z\u0081\u0001\u0000\u0000\u0000{}\u0005\u0004"+ - "\u0000\u0000|~\u0007\u0000\u0000\u0000}|\u0001\u0000\u0000\u0000}~\u0001"+ - "\u0000\u0000\u0000~\u0081\u0001\u0000\u0000\u0000\u007f\u0081\u0005\u000f"+ - "\u0000\u0000\u0080m\u0001\u0000\u0000\u0000\u0080w\u0001\u0000\u0000\u0000"+ - "\u0080{\u0001\u0000\u0000\u0000\u0080\u007f\u0001\u0000\u0000\u0000\u0081"+ - "\u0019\u0001\u0000\u0000\u0000\u0082\u0086\u0005\u000e\u0000\u0000\u0083"+ - "\u0086\u0005\u000f\u0000\u0000\u0084\u0086\u0005\u0010\u0000\u0000\u0085"+ - "\u0082\u0001\u0000\u0000\u0000\u0085\u0083\u0001\u0000\u0000\u0000\u0085"+ - "\u0084\u0001\u0000\u0000\u0000\u0086\u001b\u0001\u0000\u0000\u0000\u000f"+ - "\u001d)4APScjmruy}\u0080\u0085"; + "\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007"+ + "\u000f\u0002\u0010\u0007\u0010\u0001\u0000\u0003\u0000$\b\u0000\u0001"+ + "\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ + "\u0001\u0001\u0001\u0005\u0001.\b\u0001\n\u0001\f\u00011\t\u0001\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0003\u0002;\b\u0002\u0001\u0003\u0001\u0003\u0001"+ + "\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001"+ + "\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001"+ + "\u0005\u0005\u0005L\b\u0005\n\u0005\f\u0005O\t\u0005\u0001\u0006\u0001"+ + "\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0006\u0003\u0006W\b"+ + "\u0006\u0001\u0007\u0001\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ + "\u0003\b_\b\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n"+ + "\u0001\n\u0001\n\u0001\n\u0001\u000b\u0004\u000bl\b\u000b\u000b\u000b"+ + "\f\u000bm\u0001\u000b\u0003\u000bq\b\u000b\u0001\f\u0001\f\u0001\f\u0001"+ + "\f\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001\r\u0001"+ + "\r\u0001\r\u0003\r\u0081\b\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001"+ + "\u000e\u0001\u000e\u0003\u000e\u0088\b\u000e\u0001\u000f\u0003\u000f\u008b"+ + "\b\u000f\u0001\u000f\u0004\u000f\u008e\b\u000f\u000b\u000f\f\u000f\u008f"+ + "\u0001\u000f\u0003\u000f\u0093\b\u000f\u0001\u000f\u0001\u000f\u0003\u000f"+ + "\u0097\b\u000f\u0001\u000f\u0001\u000f\u0003\u000f\u009b\b\u000f\u0001"+ + "\u000f\u0003\u000f\u009e\b\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0003"+ + "\u0010\u00a3\b\u0010\u0001\u0010\u0000\u0002\u0002\n\u0011\u0000\u0002"+ + "\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016\u0018\u001a\u001c\u001e"+ + " \u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006\t\u0002\u0000"+ + "\u000e\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u00b1\u0000#\u0001\u0000"+ + "\u0000\u0000\u0002\'\u0001\u0000\u0000\u0000\u0004:\u0001\u0000\u0000"+ + "\u0000\u0006<\u0001\u0000\u0000\u0000\b?\u0001\u0000\u0000\u0000\nE\u0001"+ + "\u0000\u0000\u0000\fV\u0001\u0000\u0000\u0000\u000eX\u0001\u0000\u0000"+ + "\u0000\u0010^\u0001\u0000\u0000\u0000\u0012b\u0001\u0000\u0000\u0000\u0014"+ + "f\u0001\u0000\u0000\u0000\u0016p\u0001\u0000\u0000\u0000\u0018r\u0001"+ + "\u0000\u0000\u0000\u001a\u0080\u0001\u0000\u0000\u0000\u001c\u0087\u0001"+ + "\u0000\u0000\u0000\u001e\u009d\u0001\u0000\u0000\u0000 \u00a2\u0001\u0000"+ + "\u0000\u0000\"$\u0003\u0002\u0001\u0000#\"\u0001\u0000\u0000\u0000#$\u0001"+ + "\u0000\u0000\u0000$%\u0001\u0000\u0000\u0000%&\u0005\u0000\u0000\u0001"+ + "&\u0001\u0001\u0000\u0000\u0000\'(\u0006\u0001\uffff\uffff\u0000()\u0003"+ + "\u0004\u0002\u0000)/\u0001\u0000\u0000\u0000*+\n\u0002\u0000\u0000+,\u0007"+ + "\u0000\u0000\u0000,.\u0003\u0002\u0001\u0002-*\u0001\u0000\u0000\u0000"+ + ".1\u0001\u0000\u0000\u0000/-\u0001\u0000\u0000\u0000/0\u0001\u0000\u0000"+ + "\u00000\u0003\u0001\u0000\u0000\u00001/\u0001\u0000\u0000\u00002;\u0003"+ + "\u0006\u0003\u00003;\u0003\b\u0004\u00004;\u0003\u0012\t\u00005;\u0003"+ + "\u0010\b\u00006;\u0003\u0018\f\u00007;\u0003\u0014\n\u00008;\u0003\u001a"+ + "\r\u00009;\u0003\u001c\u000e\u0000:2\u0001\u0000\u0000\u0000:3\u0001\u0000"+ + "\u0000\u0000:4\u0001\u0000\u0000\u0000:5\u0001\u0000\u0000\u0000:6\u0001"+ + "\u0000\u0000\u0000:7\u0001\u0000\u0000\u0000:8\u0001\u0000\u0000\u0000"+ + ":9\u0001\u0000\u0000\u0000;\u0005\u0001\u0000\u0000\u0000<=\u0005\u0004"+ + "\u0000\u0000=>\u0003\u0004\u0002\u0000>\u0007\u0001\u0000\u0000\u0000"+ + "?@\u0003 \u0010\u0000@A\u0005\u0005\u0000\u0000AB\u0005\f\u0000\u0000"+ + "BC\u0003\n\u0005\u0000CD\u0005\r\u0000\u0000D\t\u0001\u0000\u0000\u0000"+ + "EF\u0006\u0005\uffff\uffff\u0000FG\u0003\f\u0006\u0000GM\u0001\u0000\u0000"+ + "\u0000HI\n\u0002\u0000\u0000IJ\u0007\u0000\u0000\u0000JL\u0003\n\u0005"+ + "\u0002KH\u0001\u0000\u0000\u0000LO\u0001\u0000\u0000\u0000MK\u0001\u0000"+ + "\u0000\u0000MN\u0001\u0000\u0000\u0000N\u000b\u0001\u0000\u0000\u0000"+ + "OM\u0001\u0000\u0000\u0000PW\u0003\u0006\u0003\u0000QW\u0003\b\u0004\u0000"+ + "RW\u0003\u000e\u0007\u0000SW\u0003\u0018\f\u0000TW\u0003\u0014\n\u0000"+ + "UW\u0003\u001a\r\u0000VP\u0001\u0000\u0000\u0000VQ\u0001\u0000\u0000\u0000"+ + "VR\u0001\u0000\u0000\u0000VS\u0001\u0000\u0000\u0000VT\u0001\u0000\u0000"+ + "\u0000VU\u0001\u0000\u0000\u0000W\r\u0001\u0000\u0000\u0000XY\u0005\n"+ + "\u0000\u0000YZ\u0003\n\u0005\u0000Z[\u0005\u000b\u0000\u0000[\u000f\u0001"+ + "\u0000\u0000\u0000\\]\u0005\u0010\u0000\u0000]_\u0005\u0005\u0000\u0000"+ + "^\\\u0001\u0000\u0000\u0000^_\u0001\u0000\u0000\u0000_`\u0001\u0000\u0000"+ + "\u0000`a\u0005\u0010\u0000\u0000a\u0011\u0001\u0000\u0000\u0000bc\u0005"+ + "\n\u0000\u0000cd\u0003\u0002\u0001\u0000de\u0005\u000b\u0000\u0000e\u0013"+ + "\u0001\u0000\u0000\u0000fg\u0003 \u0010\u0000gh\u0007\u0001\u0000\u0000"+ + "hi\u0003\u0016\u000b\u0000i\u0015\u0001\u0000\u0000\u0000jl\u0007\u0002"+ + "\u0000\u0000kj\u0001\u0000\u0000\u0000lm\u0001\u0000\u0000\u0000mk\u0001"+ + "\u0000\u0000\u0000mn\u0001\u0000\u0000\u0000nq\u0001\u0000\u0000\u0000"+ + "oq\u0005\u000f\u0000\u0000pk\u0001\u0000\u0000\u0000po\u0001\u0000\u0000"+ + "\u0000q\u0017\u0001\u0000\u0000\u0000rs\u0003 \u0010\u0000st\u0005\u0005"+ + "\u0000\u0000tu\u0005\u0010\u0000\u0000u\u0019\u0001\u0000\u0000\u0000"+ + "vw\u0003 \u0010\u0000wx\u0005\u0005\u0000\u0000xy\u0003\u001e\u000f\u0000"+ + "y\u0081\u0001\u0000\u0000\u0000z{\u0003 \u0010\u0000{|\u0005\u0005\u0000"+ + "\u0000|}\u0005\n\u0000\u0000}~\u0003\u001e\u000f\u0000~\u007f\u0005\u000b"+ + "\u0000\u0000\u007f\u0081\u0001\u0000\u0000\u0000\u0080v\u0001\u0000\u0000"+ + "\u0000\u0080z\u0001\u0000\u0000\u0000\u0081\u001b\u0001\u0000\u0000\u0000"+ + "\u0082\u0088\u0003\u001e\u000f\u0000\u0083\u0084\u0005\n\u0000\u0000\u0084"+ + "\u0085\u0003\u001e\u000f\u0000\u0085\u0086\u0005\u000b\u0000\u0000\u0086"+ + "\u0088\u0001\u0000\u0000\u0000\u0087\u0082\u0001\u0000\u0000\u0000\u0087"+ + "\u0083\u0001\u0000\u0000\u0000\u0088\u001d\u0001\u0000\u0000\u0000\u0089"+ + "\u008b\u0007\u0003\u0000\u0000\u008a\u0089\u0001\u0000\u0000\u0000\u008a"+ + "\u008b\u0001\u0000\u0000\u0000\u008b\u008d\u0001\u0000\u0000\u0000\u008c"+ + "\u008e\u0007\u0002\u0000\u0000\u008d\u008c\u0001\u0000\u0000\u0000\u008e"+ + "\u008f\u0001\u0000\u0000\u0000\u008f\u008d\u0001\u0000\u0000\u0000\u008f"+ + "\u0090\u0001\u0000\u0000\u0000\u0090\u0092\u0001\u0000\u0000\u0000\u0091"+ + "\u0093\u0007\u0003\u0000\u0000\u0092\u0091\u0001\u0000\u0000\u0000\u0092"+ + "\u0093\u0001\u0000\u0000\u0000\u0093\u009e\u0001\u0000\u0000\u0000\u0094"+ + "\u0096\u0007\u0000\u0000\u0000\u0095\u0097\u0007\u0003\u0000\u0000\u0096"+ + "\u0095\u0001\u0000\u0000\u0000\u0096\u0097\u0001\u0000\u0000\u0000\u0097"+ + "\u009e\u0001\u0000\u0000\u0000\u0098\u009a\u0005\u0004\u0000\u0000\u0099"+ + "\u009b\u0007\u0000\u0000\u0000\u009a\u0099\u0001\u0000\u0000\u0000\u009a"+ + "\u009b\u0001\u0000\u0000\u0000\u009b\u009e\u0001\u0000\u0000\u0000\u009c"+ + "\u009e\u0005\u000f\u0000\u0000\u009d\u008a\u0001\u0000\u0000\u0000\u009d"+ + "\u0094\u0001\u0000\u0000\u0000\u009d\u0098\u0001\u0000\u0000\u0000\u009d"+ + "\u009c\u0001\u0000\u0000\u0000\u009e\u001f\u0001\u0000\u0000\u0000\u009f"+ + "\u00a3\u0005\u000e\u0000\u0000\u00a0\u00a3\u0005\u000f\u0000\u0000\u00a1"+ + "\u00a3\u0005\u0010\u0000\u0000\u00a2\u009f\u0001\u0000\u0000\u0000\u00a2"+ + "\u00a0\u0001\u0000\u0000\u0000\u00a2\u00a1\u0001\u0000\u0000\u0000\u00a3"+ + "!\u0001\u0000\u0000\u0000\u0011#/:MV^mp\u0080\u0087\u008a\u008f\u0092"+ + "\u0096\u009a\u009d\u00a2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java index 18ef8f389195b..8200bfe0da25d 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java @@ -56,6 +56,32 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx); + /** + * Visit a parse tree produced by the {@code booleanNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanNestedQuery(KqlBaseParser.BooleanNestedQueryContext ctx); + /** + * Visit a parse tree produced by the {@code defaultNestedQuery} + * labeled alternative in {@link KqlBaseParser#nestedSubQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDefaultNestedQuery(KqlBaseParser.DefaultNestedQueryContext ctx); + /** + * Visit a parse tree produced by {@link KqlBaseParser#nestedSimpleSubQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNestedSimpleSubQuery(KqlBaseParser.NestedSimpleSubQueryContext ctx); + /** + * Visit a parse tree produced by {@link KqlBaseParser#nestedParenthesizedQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNestedParenthesizedQuery(KqlBaseParser.NestedParenthesizedQueryContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java index 5f88080fb3ed4..30740833ee40e 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlParsingContext.java @@ -11,11 +11,18 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NestedLookup; +import org.elasticsearch.index.mapper.NestedObjectMapper; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.support.NestedScope; import java.time.ZoneId; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.function.Supplier; + +import static org.elasticsearch.common.Strings.format; public class KqlParsingContext { @@ -32,10 +39,11 @@ public static Builder builder(QueryRewriteContext queryRewriteContext) { return new Builder(queryRewriteContext); } - private QueryRewriteContext queryRewriteContext; + private final QueryRewriteContext queryRewriteContext; private final boolean caseInsensitive; private final ZoneId timeZone; private final String defaultField; + private final NestedScope nestedScope = new NestedScope(); public KqlParsingContext(QueryRewriteContext queryRewriteContext, boolean caseInsensitive, ZoneId timeZone, String defaultField) { this.queryRewriteContext = queryRewriteContext; @@ -56,9 +64,17 @@ public String defaultField() { return defaultField; } + public String nestedPath(String fieldName) { + return nestedLookup().getNestedParent(fieldName); + } + + public boolean isNestedField(String fieldName) { + return nestedMappers().containsKey(fullFieldName(fieldName)); + } + public Set resolveFieldNames(String fieldNamePattern) { assert fieldNamePattern != null && fieldNamePattern.isEmpty() == false : "fieldNamePattern cannot be null or empty"; - return queryRewriteContext.getMatchingFieldNames(fieldNamePattern); + return queryRewriteContext.getMatchingFieldNames(fullFieldName(fieldNamePattern)); } public Set resolveDefaultFieldNames() { @@ -89,6 +105,38 @@ public boolean isSearchableField(String fieldName) { return isSearchableField(fieldName, fieldType(fieldName)); } + public NestedScope nestedScope() { + return nestedScope; + } + + public T withNestedPath(String nestedFieldName, Supplier supplier) { + assert isNestedField(nestedFieldName); + nestedScope.nextLevel(nestedMappers().get(fullFieldName(nestedFieldName))); + T result = supplier.get(); + nestedScope.previousLevel(); + return result; + } + + public String currentNestedPath() { + return nestedScope().getObjectMapper() != null ? nestedScope().getObjectMapper().fullPath() : null; + } + + public String fullFieldName(String fieldName) { + if (nestedScope.getObjectMapper() == null) { + return fieldName; + } + + return format("%s.%s", nestedScope.getObjectMapper().fullPath(), fieldName); + } + + private NestedLookup nestedLookup() { + return queryRewriteContext.getMappingLookup().nestedLookup(); + } + + private Map nestedMappers() { + return nestedLookup().getNestedMappers(); + } + public static class Builder { private final QueryRewriteContext queryRewriteContext; private boolean caseInsensitive = true; diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilder.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilder.java index 5dff9126b6be4..e2817665d8f79 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilder.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilder.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -26,6 +27,7 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.kql.parser.KqlParser; import org.elasticsearch.xpack.kql.parser.KqlParsingContext; +import org.elasticsearch.xpack.kql.parser.KqlParsingException; import java.io.IOException; import java.time.ZoneId; @@ -37,9 +39,9 @@ public class KqlQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "kql"; public static final ParseField QUERY_FIELD = new ParseField("query"); - private static final ParseField CASE_INSENSITIVE_FIELD = new ParseField("case_insensitive"); - private static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone"); - private static final ParseField DEFAULT_FIELD_FIELD = new ParseField("default_field"); + public static final ParseField CASE_INSENSITIVE_FIELD = new ParseField("case_insensitive"); + public static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone"); + public static final ParseField DEFAULT_FIELD_FIELD = new ParseField("default_field"); private static final Logger log = LogManager.getLogger(KqlQueryBuilder.class); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, a -> { @@ -151,12 +153,16 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep @Override protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { - KqlParser parser = new KqlParser(); - QueryBuilder rewrittenQuery = parser.parseKqlQuery(query, createKqlParserContext(context)); + try { + KqlParser parser = new KqlParser(); + QueryBuilder rewrittenQuery = parser.parseKqlQuery(query, createKqlParserContext(context)); - log.trace(() -> Strings.format("KQL query %s translated to Query DSL: %s", query, Strings.toString(rewrittenQuery))); + log.trace(() -> Strings.format("KQL query %s translated to Query DSL: %s", query, Strings.toString(rewrittenQuery))); - return rewrittenQuery; + return rewrittenQuery; + } catch (KqlParsingException e) { + throw new QueryShardException(context, "Failed to parse KQL query [{}]", e, query); + } } @Override diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java index 588e60bd4dd75..e6e4e20cfd3ca 100644 --- a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/AbstractKqlParserTestCase.java @@ -46,11 +46,9 @@ import static org.hamcrest.Matchers.equalTo; public abstract class AbstractKqlParserTestCase extends AbstractBuilderTestCase { - protected static final String SUPPORTED_QUERY_FILE_PATH = "/supported-queries"; protected static final String UNSUPPORTED_QUERY_FILE_PATH = "/unsupported-queries"; protected static final Predicate BOOLEAN_QUERY_FILTER = (q) -> q.matches("(?i)[^{]*[^\\\\]*(NOT|AND|OR)[^}]*"); - protected static final String NESTED_FIELD_NAME = "mapped_nested"; @Override diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlNestedFieldQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlNestedFieldQueryTests.java new file mode 100644 index 0000000000000..5660945fa0db3 --- /dev/null +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlNestedFieldQueryTests.java @@ -0,0 +1,297 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.kql.parser; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.common.Strings.format; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class KqlNestedFieldQueryTests extends AbstractKqlParserTestCase { + public void testInvalidNestedFieldName() { + for (String invalidFieldName : List.of(OBJECT_FIELD_NAME, TEXT_FIELD_NAME, "not_a_field", "mapped_nest*")) { + KqlParsingException e = assertThrows( + KqlParsingException.class, + () -> parseKqlQuery(format("%s : { %s: foo AND %s < 10 } ", invalidFieldName, TEXT_FIELD_NAME, INT_FIELD_NAME)) + ); + assertThat(e.getMessage(), Matchers.containsString(invalidFieldName)); + assertThat(e.getMessage(), Matchers.containsString("is not a valid nested field name")); + } + } + + public void testInlineNestedFieldMatchTextQuery() { + for (String fieldName : List.of(TEXT_FIELD_NAME, INT_FIELD_NAME)) { + { + // Querying a nested text subfield. + String nestedFieldName = format("%s.%s", NESTED_FIELD_NAME, fieldName); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + assertMatchQueryBuilder(nestedQuery.query(), nestedFieldName, searchTerms); + } + + { + // Several levels of nested fields. + String nestedFieldName = format("%s.%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, fieldName); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + NestedQueryBuilder nestedSubQuery = asInstanceOf(NestedQueryBuilder.class, nestedQuery.query()); + assertThat(nestedSubQuery.path(), equalTo(format("%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME))); + + assertMatchQueryBuilder(nestedSubQuery.query(), nestedFieldName, searchTerms); + } + } + } + + public void testInlineNestedFieldMatchKeywordFieldQuery() { + { + // Querying a nested text subfield. + String nestedFieldName = format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + assertTermQueryBuilder(nestedQuery.query(), nestedFieldName, searchTerms); + } + + { + // Several levels of nested fields. + String nestedFieldName = format("%s.%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, KEYWORD_FIELD_NAME); + String searchTerms = randomSearchTerms(); + String kqlQueryString = format("%s: %s", nestedFieldName, searchTerms); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + NestedQueryBuilder nestedSubQuery = asInstanceOf(NestedQueryBuilder.class, nestedQuery.query()); + assertThat(nestedSubQuery.path(), equalTo(format("%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME))); + + assertTermQueryBuilder(nestedSubQuery.query(), nestedFieldName, searchTerms); + } + } + + public void testInlineNestedFieldRangeQuery() { + { + // Querying a nested text subfield. + String nestedFieldName = format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME); + String operator = randomFrom(">", ">=", "<", "<="); + String kqlQueryString = format("%s %s %s", nestedFieldName, operator, randomDouble()); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + assertRangeQueryBuilder(nestedQuery.query(), nestedFieldName, rangeQueryBuilder -> {}); + } + + { + // Several levels of nested fields. + String nestedFieldName = format("%s.%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, INT_FIELD_NAME); + String operator = randomFrom(">", ">=", "<", "<="); + String kqlQueryString = format("%s %s %s", nestedFieldName, operator, randomDouble()); + + NestedQueryBuilder nestedQuery = asInstanceOf(NestedQueryBuilder.class, parseKqlQuery(kqlQueryString)); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + NestedQueryBuilder nestedSubQuery = asInstanceOf(NestedQueryBuilder.class, nestedQuery.query()); + assertThat(nestedSubQuery.path(), equalTo(format("%s.%s", NESTED_FIELD_NAME, NESTED_FIELD_NAME))); + + assertRangeQueryBuilder(nestedSubQuery.query(), nestedFieldName, rangeQueryBuilder -> {}); + } + } + + public void testNestedQuerySyntax() { + // Single word - Keyword & text field + List.of(KEYWORD_FIELD_NAME, TEXT_FIELD_NAME) + .forEach( + fieldName -> assertThat( + parseKqlQuery(format("%s : { %s : %s }", NESTED_FIELD_NAME, fieldName, "foo")), + equalTo(parseKqlQuery(format("%s.%s : %s", NESTED_FIELD_NAME, fieldName, "foo"))) + ) + ); + + // Multiple words - Keyword & text field + List.of(KEYWORD_FIELD_NAME, TEXT_FIELD_NAME) + .forEach( + fieldName -> assertThat( + parseKqlQuery(format("%s : { %s : %s }", NESTED_FIELD_NAME, fieldName, "foo bar")), + equalTo(parseKqlQuery(format("%s.%s : %s", NESTED_FIELD_NAME, fieldName, "foo bar"))) + ) + ); + + // Range syntax + { + String operator = randomFrom("<", "<=", ">", ">="); + double rangeValue = randomDouble(); + assertThat( + parseKqlQuery(format("%s : { %s %s %s }", NESTED_FIELD_NAME, INT_FIELD_NAME, operator, rangeValue)), + equalTo(parseKqlQuery(format("%s.%s %s %s", NESTED_FIELD_NAME, INT_FIELD_NAME, operator, rangeValue))) + ); + } + + // Several level of nesting + { + QueryBuilder inlineQuery = parseKqlQuery( + format("%s.%s.%s : %s", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar") + ); + + assertThat( + parseKqlQuery(format("%s : { %s : { %s : %s } }", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar")), + equalTo(inlineQuery) + ); + + assertThat( + parseKqlQuery(format("%s.%s : { %s : %s }", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar")), + equalTo(inlineQuery) + ); + + assertThat( + parseKqlQuery(format("%s : { %s.%s : %s }", NESTED_FIELD_NAME, NESTED_FIELD_NAME, TEXT_FIELD_NAME, "foo bar")), + equalTo(inlineQuery) + ); + } + } + + public void testBooleanAndNestedQuerySyntax() { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery( + format("%s: { %s : foo AND %s: bar AND %s > 3}", NESTED_FIELD_NAME, TEXT_FIELD_NAME, KEYWORD_FIELD_NAME, INT_FIELD_NAME) + ) + ); + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.mustNot(), empty()); + assertThat(subQuery.must(), hasSize(3)); + assertMatchQueryBuilder( + subQuery.must().stream().filter(q -> q instanceof MatchQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, TEXT_FIELD_NAME), + "foo" + ); + assertTermQueryBuilder( + subQuery.must().stream().filter(q -> q instanceof TermQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME), + "bar" + ); + assertRangeQueryBuilder( + subQuery.must().stream().filter(q -> q instanceof RangeQueryBuilder).findAny().get(), + format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME), + q -> {} + ); + } + + public void testBooleanOrNestedQuerySyntax() { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery( + format("%s: { %s : foo OR %s: bar OR %s > 3 }", NESTED_FIELD_NAME, TEXT_FIELD_NAME, KEYWORD_FIELD_NAME, INT_FIELD_NAME) + ) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.mustNot(), empty()); + assertThat(subQuery.should(), hasSize(3)); + assertMatchQueryBuilder( + subQuery.should().stream().filter(q -> q instanceof MatchQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, TEXT_FIELD_NAME), + "foo" + ); + assertTermQueryBuilder( + subQuery.should().stream().filter(q -> q instanceof TermQueryBuilder).findFirst().get(), + format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME), + "bar" + ); + assertRangeQueryBuilder( + subQuery.should().stream().filter(q -> q instanceof RangeQueryBuilder).findAny().get(), + format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME), + q -> {} + ); + } + + public void testBooleanNotNestedQuerySyntax() { + { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery(format("%s: { NOT %s : foo }", NESTED_FIELD_NAME, TEXT_FIELD_NAME)) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.mustNot(), hasSize(1)); + assertMatchQueryBuilder(subQuery.mustNot().get(0), format("%s.%s", NESTED_FIELD_NAME, TEXT_FIELD_NAME), "foo"); + } + + { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery(format("%s: { NOT %s : foo }", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME)) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.mustNot(), hasSize(1)); + assertTermQueryBuilder(subQuery.mustNot().get(0), format("%s.%s", NESTED_FIELD_NAME, KEYWORD_FIELD_NAME), "foo"); + } + + { + NestedQueryBuilder nestedQuery = asInstanceOf( + NestedQueryBuilder.class, + parseKqlQuery(format("%s: { NOT %s < 3 }", NESTED_FIELD_NAME, INT_FIELD_NAME)) + ); + + assertThat(nestedQuery.path(), equalTo(NESTED_FIELD_NAME)); + + BoolQueryBuilder subQuery = asInstanceOf(BoolQueryBuilder.class, nestedQuery.query()); + assertThat(subQuery.must(), empty()); + assertThat(subQuery.filter(), empty()); + assertThat(subQuery.should(), empty()); + assertThat(subQuery.mustNot(), hasSize(1)); + assertRangeQueryBuilder(subQuery.mustNot().get(0), format("%s.%s", NESTED_FIELD_NAME, INT_FIELD_NAME), q -> {}); + } + } + + private static String randomSearchTerms() { + return Stream.generate(ESTestCase::randomIdentifier).limit(randomIntBetween(1, 10)).collect(Collectors.joining(" ")); + } +} diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java index 45dd3312bbc03..6415cdb94ada7 100644 --- a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/parser/KqlParserExistsQueryTests.java @@ -10,7 +10,10 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; + +import java.util.regex.Pattern; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; @@ -35,11 +38,18 @@ public void testParseExistsQueryWithNoMatchingFields() { public void testParseExistsQueryWithASingleField() { for (String fieldName : searchableFields()) { - ExistsQueryBuilder parsedQuery = asInstanceOf(ExistsQueryBuilder.class, parseKqlQuery(kqlExistsQuery(fieldName))); - assertThat(parsedQuery.fieldName(), equalTo(fieldName)); + QueryBuilder parsedQuery = parseKqlQuery(kqlExistsQuery(fieldName)); // Using quotes to wrap the field name does not change the result. assertThat(parseKqlQuery(kqlExistsQuery("\"" + fieldName + "\"")), equalTo(parsedQuery)); + + long nestingLevel = Pattern.compile("[.]").splitAsStream(fieldName).takeWhile(s -> s.equals(NESTED_FIELD_NAME)).count(); + for (int i = 0; i < nestingLevel; i++) { + parsedQuery = asInstanceOf(NestedQueryBuilder.class, parsedQuery).query(); + } + + ExistsQueryBuilder existsQuery = asInstanceOf(ExistsQueryBuilder.class, parsedQuery); + assertThat(existsQuery.fieldName(), equalTo(fieldName)); } } @@ -53,7 +63,9 @@ public void testParseExistsQueryUsingWildcardFieldName() { assertThat( parsedQuery.should(), - containsInAnyOrder(searchableFields(fieldNamePattern).stream().map(QueryBuilders::existsQuery).toArray()) + containsInAnyOrder( + searchableFields(fieldNamePattern).stream().map(fieldName -> parseKqlQuery(kqlExistsQuery(fieldName))).toArray() + ) ); } diff --git a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilderTests.java b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilderTests.java index 2bc23c7d457dd..7323f7d6d1a4e 100644 --- a/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilderTests.java +++ b/x-pack/plugin/kql/src/test/java/org/elasticsearch/xpack/kql/query/KqlQueryBuilderTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.kql.query; import org.apache.lucene.search.Query; +import org.elasticsearch.Build; import org.elasticsearch.core.Strings; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -21,6 +22,7 @@ import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xpack.kql.KqlPlugin; import org.hamcrest.Matchers; +import org.junit.BeforeClass; import java.io.IOException; import java.util.Collection; @@ -34,6 +36,10 @@ import static org.hamcrest.Matchers.nullValue; public class KqlQueryBuilderTests extends AbstractQueryTestCase { + @BeforeClass + protected static void ensureSnapshotBuild() { + assumeTrue("requires snapshot builds", Build.current().isSnapshot()); + } @Override protected Collection> getPlugins() { diff --git a/x-pack/plugin/kql/src/test/resources/supported-queries b/x-pack/plugin/kql/src/test/resources/supported-queries index b659b1ae5b1db..f54a1d32fe3be 100644 --- a/x-pack/plugin/kql/src/test/resources/supported-queries +++ b/x-pack/plugin/kql/src/test/resources/supported-queries @@ -91,13 +91,6 @@ mapped_nested: { NOT(mapped_string:foo AND mapped_string_2:foo bar) } mapped_nested: { NOT mapped_string:foo AND NOT mapped_string_2:foo bar } mapped_nested: { (NOT mapped_string:foo) AND (NOT mapped_string_2:foo bar) } mapped_nested: { NOT(mapped_string:foo) AND NOT(mapped_string_2:foo bar) } -mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar AND foo bar } -mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar OR foo bar } -mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar OR foo bar } -mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar AND foo bar } -mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } -mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } -mapped_nested: { mapped_string:foo OR (mapped_string_2:foo bar OR foo bar) } mapped_nested: { mapped_str*:foo } mapped_nested: { mapped_nested : { mapped_string:foo AND mapped_int < 3 } AND mapped_string_2:foo bar } mapped_nested: { mapped_nested.mapped_string:foo AND mapped_string_2:foo bar } diff --git a/x-pack/plugin/kql/src/test/resources/unsupported-queries b/x-pack/plugin/kql/src/test/resources/unsupported-queries index 149bcf5bd2b5a..526ae94d6ac88 100644 --- a/x-pack/plugin/kql/src/test/resources/unsupported-queries +++ b/x-pack/plugin/kql/src/test/resources/unsupported-queries @@ -25,6 +25,20 @@ mapped_string:(foo (bar)) // Bad syntax for nested fields: mapped_nested { mapped_string: bar } +// Unknown nested field or not a nested field +not_nested : { mapped_string: bar } +mapped_string: { mapped_string: bar } + +// Nested query can not use fieldless subqueries +mapped_nested: { foo } +mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar AND foo bar } +mapped_nested: { mapped_string:foo AND mapped_string_2:foo bar OR foo bar } +mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar OR foo bar } +mapped_nested: { mapped_string:foo OR mapped_string_2:foo bar AND foo bar } +mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } +mapped_nested: { mapped_string:foo AND (mapped_string_2:foo bar OR foo bar) } +mapped_nested: { mapped_string:foo OR (mapped_string_2:foo bar OR foo bar) } + // Missing escape sequences: mapped_string: foo:bar mapped_string: (foo and bar) diff --git a/x-pack/plugin/kql/src/yamlRestTest/resources/rest-api-spec/test/kql/50_kql_nested_fields_query.yml b/x-pack/plugin/kql/src/yamlRestTest/resources/rest-api-spec/test/kql/50_kql_nested_fields_query.yml new file mode 100644 index 0000000000000..4ce6688e5222d --- /dev/null +++ b/x-pack/plugin/kql/src/yamlRestTest/resources/rest-api-spec/test/kql/50_kql_nested_fields_query.yml @@ -0,0 +1,218 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ kql_query ] + test_runner_features: [ capabilities, contains ] + reason: KQL query is not available + + - requires: + "test_runner_features": "contains" + + - do: + indices.create: + index: test-index + body: + mappings: + properties: + department: + type: keyword + staff: + type: integer + courses: + type: nested + properties: + name: + type: text + credits: + type: integer + sessions: + type: nested + properties: + semester: + type: keyword + students: + type: integer + + - do: + bulk: + index: test-index + refresh: true + body: | + { "index" : { "_id": "doc-1" } } + { "department": "compsci", "staff": 12, "courses": [ { "name": "Object Oriented Programming", "credits": 3, "sessions": [ { "semester": "spr2021", "students": 37 }, { "semester": "fall2020", "students": 45} ] }, { "name": "Theory of Computation", "credits": 4, "sessions": [ { "semester": "spr2021", "students": 19 }, { "semester": "fall2020", "students": 14 } ] } ] } + { "index" : { "_id": "doc-42" } } + { "department": "math", "staff": 20, "courses": [ { "name": "Precalculus", "credits": 1, "sessions": [ { "semester": "spr2021", "students": 100 }, { "semester": "fall2020", "students": 134 } ] }, { "name": "Linear Algebra", "credits": 3, "sessions": [ { "semester": "spr2021", "students": 29 }, { "semester": "fall2020", "students": 23 } ] } ] } + +--- +"Inline syntax": + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses.name: object oriented programming" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses.name: object oriented programming AND courses.credits > 3" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses.name: object oriented programming OR courses.credits > 3" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + +--- +"Nested field syntax": + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses : { name: object oriented programming }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming AND credits > 3 }" + } + } + } + - match: { hits.total: 0 } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming AND credits >= 3 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming OR credits > 3 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { NOT name: object oriented programming AND credits < 4 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-42" } + + +--- +"Several level of nesting field syntax": + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: object oriented programming AND sessions.semester: spr2021 }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { sessions : { semester: spr2021 AND students < 20 } }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } + + - do: + search: + index: test-index + rest_total_hits_as_int: true + body: > + { + "query": { + "kql": { + "query": "courses: { name: computation AND sessions : { semester: spr2021 AND students < 20 } }" + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "doc-1" } diff --git a/x-pack/plugin/logsdb/build.gradle b/x-pack/plugin/logsdb/build.gradle index 60578f832d153..1aef69e0e3fac 100644 --- a/x-pack/plugin/logsdb/build.gradle +++ b/x-pack/plugin/logsdb/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.internal-es-plugin' diff --git a/x-pack/plugin/logsdb/qa/with-basic/build.gradle b/x-pack/plugin/logsdb/qa/with-basic/build.gradle index 2fdeed338e1c1..9729ac9c29cef 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/build.gradle +++ b/x-pack/plugin/logsdb/qa/with-basic/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { @@ -15,7 +13,7 @@ dependencies { tasks.named("javaRestTest").configure { // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) usesDefaultDistribution() } diff --git a/x-pack/plugin/logsdb/qa/with-custom-cutoff/build.gradle b/x-pack/plugin/logsdb/qa/with-custom-cutoff/build.gradle new file mode 100644 index 0000000000000..9729ac9c29cef --- /dev/null +++ b/x-pack/plugin/logsdb/qa/with-custom-cutoff/build.gradle @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +apply plugin: 'elasticsearch.internal-java-rest-test' + +dependencies { + javaRestTestImplementation(testArtifact(project(xpackModule('core')))) +} + +tasks.named("javaRestTest").configure { + // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC + buildParams.withFipsEnabledOnly(it) + + usesDefaultDistribution() +} diff --git a/x-pack/plugin/logsdb/qa/with-custom-cutoff/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java b/x-pack/plugin/logsdb/qa/with-custom-cutoff/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java new file mode 100644 index 0000000000000..3266e2e6e4757 --- /dev/null +++ b/x-pack/plugin/logsdb/qa/with-custom-cutoff/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matchers; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.Map; + +public class LogsdbWithBasicRestIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .systemProperty("es.mapping.synthetic_source_fallback_to_stored_source.cutoff_date_restricted_override", "2027-12-31T23:59") + .setting("xpack.security.enabled", "false") + .setting("cluster.logsdb.enabled", "true") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testCustomCutoffDateUsage() throws IOException { + var response = getAsMap("/_xpack/usage"); + Map usage = (Map) response.get("logsdb"); + assertThat(usage, Matchers.hasEntry("available", true)); + assertThat(usage, Matchers.hasEntry("enabled", true)); + assertThat(usage, Matchers.hasEntry("indices_count", 0)); + assertThat(usage, Matchers.hasEntry("indices_with_synthetic_source", 0)); + assertThat(usage, Matchers.hasEntry("num_docs", 0)); + assertThat(usage, Matchers.hasEntry("size_in_bytes", 0)); + assertThat(usage, Matchers.hasEntry("has_custom_cutoff_date", true)); + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java index f529b9fa1db96..99acbec04551e 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.logsdb; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.junit.Before; @@ -112,8 +114,11 @@ public void testConfigureStoredSourceBeforeIndexCreation() throws IOException { }"""; assertOK(putComponentTemplate(client, "logs@custom", storedSourceMapping)); - assertOK(createDataStream(client, "logs-custom-dev")); - + Request request = new Request("PUT", "_data_stream/logs-custom-dev"); + if (SourceFieldMapper.onOrAfterDeprecateModeVersion(minimumIndexVersion())) { + request.setOptions(expectVersionSpecificWarnings(v -> v.current(SourceFieldMapper.DEPRECATION_WARNING))); + } + assertOK(client.performRequest(request)); var mapping = getMapping(client, getDataStreamBackingIndex(client, "logs-custom-dev", 0)); String sourceMode = (String) subObject("_source").apply(mapping).get("mode"); assertThat(sourceMode, equalTo("stored")); @@ -182,7 +187,11 @@ public void testConfigureStoredSourceWhenIndexIsCreated() throws IOException { }"""; assertOK(putComponentTemplate(client, "logs@custom", storedSourceMapping)); - assertOK(createDataStream(client, "logs-custom-dev")); + Request request = new Request("PUT", "_data_stream/logs-custom-dev"); + if (SourceFieldMapper.onOrAfterDeprecateModeVersion(minimumIndexVersion())) { + request.setOptions(expectVersionSpecificWarnings(v -> v.current(SourceFieldMapper.DEPRECATION_WARNING))); + } + assertOK(client.performRequest(request)); var mapping = getMapping(client, getDataStreamBackingIndex(client, "logs-custom-dev", 0)); String sourceMode = (String) subObject("_source").apply(mapping).get("mode"); diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java index cc7f5bdb33871..0990592cef5e3 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; @@ -35,6 +36,11 @@ protected static Response putComponentTemplate(final RestClient client, final St throws IOException { final Request request = new Request("PUT", "/_component_template/" + componentTemplate); request.setJsonEntity(contends); + if (isSyntheticSourceConfiguredInTemplate(contends) && SourceFieldMapper.onOrAfterDeprecateModeVersion(minimumIndexVersion())) { + request.setOptions( + expectVersionSpecificWarnings((VersionSensitiveWarningsHandler v) -> v.current(SourceFieldMapper.DEPRECATION_WARNING)) + ); + } return client.performRequest(request); } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java index 8d7a813b206d8..e411f2f3f314d 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/qa/StandardVersusLogsIndexModeChallengeRestIT.java @@ -181,7 +181,7 @@ protected static void waitForLogs(RestClient client) throws Exception { } public void testMatchAllQuery() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -199,7 +199,7 @@ public void testMatchAllQuery() throws IOException { } public void testTermsQuery() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -217,7 +217,7 @@ public void testTermsQuery() throws IOException { } public void testHistogramAggregation() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -235,7 +235,7 @@ public void testHistogramAggregation() throws IOException { } public void testTermsAggregation() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -253,7 +253,7 @@ public void testTermsAggregation() throws IOException { } public void testDateHistogramAggregation() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -271,7 +271,7 @@ public void testDateHistogramAggregation() throws IOException { } public void testEsqlSource() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -287,7 +287,7 @@ public void testEsqlSource() throws IOException { } public void testEsqlTermsAggregation() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); @@ -302,7 +302,7 @@ public void testEsqlTermsAggregation() throws IOException { } public void testEsqlTermsAggregationByMethod() throws IOException { - int numberOfDocuments = ESTestCase.randomIntBetween(100, 200); + int numberOfDocuments = ESTestCase.randomIntBetween(20, 100); final List documents = generateDocuments(numberOfDocuments); indexDocuments(documents); diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 0eb0754985c94..904b00e6d0450 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -13,6 +13,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xpack.core.XPackPlugin; @@ -46,8 +48,14 @@ public LogsDBPlugin(Settings settings) { @Override public Collection createComponents(PluginServices services) { - licenseService.setLicenseState(XPackPlugin.getSharedLicenseState()); + licenseService.setLicenseService(getLicenseService()); + licenseService.setLicenseState(getLicenseState()); var clusterSettings = services.clusterService().getClusterSettings(); + // The `cluster.logsdb.enabled` setting is registered by this plugin, but its value may be updated by other plugins + // before this plugin registers its settings update consumer below. This means we might miss updates that occurred earlier. + // To handle this, we explicitly fetch the current `cluster.logsdb.enabled` setting value from the cluster settings + // and update it, ensuring we capture any prior changes. + logsdbIndexModeSettingsProvider.updateClusterIndexModeLogsdbEnabled(clusterSettings.get(CLUSTER_LOGSDB_ENABLED)); clusterSettings.addSettingsUpdateConsumer(FALLBACK_SETTING, licenseService::setSyntheticSourceFallback); clusterSettings.addSettingsUpdateConsumer( CLUSTER_LOGSDB_ENABLED, @@ -62,10 +70,13 @@ public Collection getAdditionalIndexSettingProviders(Index if (DiscoveryNode.isStateless(settings)) { return List.of(logsdbIndexModeSettingsProvider); } - return List.of( - new SyntheticSourceIndexSettingsProvider(licenseService, parameters.mapperServiceFactory(), logsdbIndexModeSettingsProvider), - logsdbIndexModeSettingsProvider + var syntheticSettingProvider = new SyntheticSourceIndexSettingsProvider( + licenseService, + parameters.mapperServiceFactory(), + logsdbIndexModeSettingsProvider, + () -> parameters.clusterService().state().nodes().getMinSupportedIndexVersion() ); + return List.of(syntheticSettingProvider, logsdbIndexModeSettingsProvider); } @Override @@ -80,4 +91,12 @@ public List> getSettings() { actions.add(new ActionPlugin.ActionHandler<>(XPackInfoFeatureAction.LOGSDB, LogsDBInfoTransportAction.class)); return actions; } + + protected XPackLicenseState getLicenseState() { + return XPackPlugin.getSharedLicenseState(); + } + + protected LicenseService getLicenseService() { + return XPackPlugin.getSharedLicenseService(); + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java index 62e1eef3e0e97..f4fa2a29d79a0 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBUsageTransportAction.java @@ -77,6 +77,7 @@ protected void masterOperation( } } final boolean enabled = LogsDBPlugin.CLUSTER_LOGSDB_ENABLED.get(clusterService.getSettings()); + final boolean hasCustomCutoffDate = System.getProperty(SyntheticSourceLicenseService.CUTOFF_DATE_SYS_PROP_NAME) != null; if (featureService.clusterHasFeature(state, XPackFeatures.LOGSDB_TELMETRY_STATS)) { final DiscoveryNode[] nodes = state.nodes().getDataNodes().values().toArray(DiscoveryNode[]::new); final var statsRequest = new IndexModeStatsActionType.StatsRequest(nodes); @@ -91,13 +92,16 @@ protected void masterOperation( finalNumIndices, finalNumIndicesWithSyntheticSources, indexStats.numDocs(), - indexStats.numBytes() + indexStats.numBytes(), + hasCustomCutoffDate ) ); })); } else { listener.onResponse( - new XPackUsageFeatureResponse(new LogsDBFeatureSetUsage(true, enabled, numIndices, numIndicesWithSyntheticSources, 0L, 0L)) + new XPackUsageFeatureResponse( + new LogsDBFeatureSetUsage(true, enabled, numIndices, numIndicesWithSyntheticSources, 0L, 0L, hasCustomCutoffDate) + ) ); } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java index e87f10ec19916..462bad4b19551 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.time.Instant; import java.util.List; +import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_PATH; @@ -39,15 +40,18 @@ final class SyntheticSourceIndexSettingsProvider implements IndexSettingProvider private final SyntheticSourceLicenseService syntheticSourceLicenseService; private final CheckedFunction mapperServiceFactory; private final LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider; + private final Supplier createdIndexVersion; SyntheticSourceIndexSettingsProvider( SyntheticSourceLicenseService syntheticSourceLicenseService, CheckedFunction mapperServiceFactory, - LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider + LogsdbIndexModeSettingsProvider logsdbIndexModeSettingsProvider, + Supplier createdIndexVersion ) { this.syntheticSourceLicenseService = syntheticSourceLicenseService; this.mapperServiceFactory = mapperServiceFactory; this.logsdbIndexModeSettingsProvider = logsdbIndexModeSettingsProvider; + this.createdIndexVersion = createdIndexVersion; } @Override @@ -77,8 +81,13 @@ public Settings getAdditionalIndexSettings( // This index name is used when validating component and index templates, we should skip this check in that case. // (See MetadataIndexTemplateService#validateIndexTemplateV2(...) method) boolean isTemplateValidation = "validate-index-name".equals(indexName); + boolean legacyLicensedUsageOfSyntheticSourceAllowed = isLegacyLicensedUsageOfSyntheticSourceAllowed( + templateIndexMode, + indexName, + dataStreamName + ); if (newIndexHasSyntheticSourceUsage(indexName, templateIndexMode, indexTemplateAndCreateRequestSettings, combinedTemplateMappings) - && syntheticSourceLicenseService.fallbackToStoredSource(isTemplateValidation)) { + && syntheticSourceLicenseService.fallbackToStoredSource(isTemplateValidation, legacyLicensedUsageOfSyntheticSourceAllowed)) { LOGGER.debug("creation of index [{}] with synthetic source without it being allowed", indexName); return Settings.builder() .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString()) @@ -148,7 +157,7 @@ private IndexMetadata buildIndexMetadataForMapperService( ); int shardReplicas = indexTemplateAndCreateRequestSettings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0); var finalResolvedSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_VERSION_CREATED, createdIndexVersion.get()) .put(indexTemplateAndCreateRequestSettings) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, dummyShards) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, shardReplicas) @@ -163,4 +172,29 @@ private IndexMetadata buildIndexMetadataForMapperService( tmpIndexMetadata.settings(finalResolvedSettings); return tmpIndexMetadata.build(); } + + /** + * The GA-ed use cases in which synthetic source usage is allowed with gold or platinum license. + */ + boolean isLegacyLicensedUsageOfSyntheticSourceAllowed(IndexMode templateIndexMode, String indexName, String dataStreamName) { + if (templateIndexMode == IndexMode.TIME_SERIES) { + return true; + } + + // To allow the following patterns: profiling-metrics and profiling-events + if (dataStreamName != null && dataStreamName.startsWith("profiling-")) { + return true; + } + // To allow the following patterns: .profiling-sq-executables, .profiling-sq-leafframes and .profiling-stacktraces + if (indexName.startsWith(".profiling-")) { + return true; + } + // To allow the following patterns: metrics-apm.transaction.*, metrics-apm.service_transaction.*, metrics-apm.service_summary.*, + // metrics-apm.service_destination.*, "metrics-apm.internal-* and metrics-apm.app.* + if (dataStreamName != null && dataStreamName.startsWith("metrics-apm.")) { + return true; + } + + return false; + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java index 55d4bfe05abe3..26a672fb1c903 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java @@ -7,18 +7,29 @@ package org.elasticsearch.xpack.logsdb; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.LicensedFeature; import org.elasticsearch.license.XPackLicenseState; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; + /** * Determines based on license and fallback setting whether synthetic source usages should fallback to stored source. */ final class SyntheticSourceLicenseService { - private static final String MAPPINGS_FEATURE_FAMILY = "mappings"; + static final String MAPPINGS_FEATURE_FAMILY = "mappings"; + // You can only override this property if you received explicit approval from Elastic. + static final String CUTOFF_DATE_SYS_PROP_NAME = "es.mapping.synthetic_source_fallback_to_stored_source.cutoff_date_restricted_override"; + private static final Logger LOGGER = LogManager.getLogger(SyntheticSourceLicenseService.class); + static final long DEFAULT_CUTOFF_DATE = LocalDateTime.of(2025, 2, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); /** * A setting that determines whether source mode should always be stored source. Regardless of licence. @@ -30,31 +41,71 @@ final class SyntheticSourceLicenseService { Setting.Property.Dynamic ); - private static final LicensedFeature.Momentary SYNTHETIC_SOURCE_FEATURE = LicensedFeature.momentary( + static final LicensedFeature.Momentary SYNTHETIC_SOURCE_FEATURE = LicensedFeature.momentary( MAPPINGS_FEATURE_FAMILY, "synthetic-source", License.OperationMode.ENTERPRISE ); + static final LicensedFeature.Momentary SYNTHETIC_SOURCE_FEATURE_LEGACY = LicensedFeature.momentary( + MAPPINGS_FEATURE_FAMILY, + "synthetic-source-legacy", + License.OperationMode.GOLD + ); + + private final long cutoffDate; + private LicenseService licenseService; private XPackLicenseState licenseState; private volatile boolean syntheticSourceFallback; SyntheticSourceLicenseService(Settings settings) { - syntheticSourceFallback = FALLBACK_SETTING.get(settings); + this(settings, System.getProperty(CUTOFF_DATE_SYS_PROP_NAME)); + } + + SyntheticSourceLicenseService(Settings settings, String cutoffDate) { + this.syntheticSourceFallback = FALLBACK_SETTING.get(settings); + this.cutoffDate = getCutoffDate(cutoffDate); } /** * @return whether synthetic source mode should fallback to stored source. */ - public boolean fallbackToStoredSource(boolean isTemplateValidation) { + public boolean fallbackToStoredSource(boolean isTemplateValidation, boolean legacyLicensedUsageOfSyntheticSourceAllowed) { if (syntheticSourceFallback) { return true; } + var licenseStateSnapshot = licenseState.copyCurrentLicenseState(); + if (checkFeature(SYNTHETIC_SOURCE_FEATURE, licenseStateSnapshot, isTemplateValidation)) { + return false; + } + + var license = licenseService.getLicense(); + if (license == null) { + return true; + } + + boolean beforeCutoffDate = license.startDate() <= cutoffDate; + if (legacyLicensedUsageOfSyntheticSourceAllowed + && beforeCutoffDate + && checkFeature(SYNTHETIC_SOURCE_FEATURE_LEGACY, licenseStateSnapshot, isTemplateValidation)) { + // platinum license will allow synthetic source with gold legacy licensed feature too. + LOGGER.debug("legacy license [{}] is allowed to use synthetic source", licenseStateSnapshot.getOperationMode().description()); + return false; + } + + return true; + } + + private static boolean checkFeature( + LicensedFeature.Momentary licensedFeature, + XPackLicenseState licenseStateSnapshot, + boolean isTemplateValidation + ) { if (isTemplateValidation) { - return SYNTHETIC_SOURCE_FEATURE.checkWithoutTracking(licenseState) == false; + return licensedFeature.checkWithoutTracking(licenseStateSnapshot); } else { - return SYNTHETIC_SOURCE_FEATURE.check(licenseState) == false; + return licensedFeature.check(licenseStateSnapshot); } } @@ -62,7 +113,26 @@ void setSyntheticSourceFallback(boolean syntheticSourceFallback) { this.syntheticSourceFallback = syntheticSourceFallback; } + void setLicenseService(LicenseService licenseService) { + this.licenseService = licenseService; + } + void setLicenseState(XPackLicenseState licenseState) { this.licenseState = licenseState; } + + private static long getCutoffDate(String cutoffDateAsString) { + if (cutoffDateAsString != null) { + long cutoffDate = LocalDateTime.parse(cutoffDateAsString).toInstant(ZoneOffset.UTC).toEpochMilli(); + LOGGER.warn("Configuring [{}] is only allowed with explicit approval from Elastic.", CUTOFF_DATE_SYS_PROP_NAME); + LOGGER.info( + "Configuring [{}] to [{}]", + CUTOFF_DATE_SYS_PROP_NAME, + LocalDateTime.ofInstant(Instant.ofEpochMilli(cutoffDate), ZoneOffset.UTC) + ); + return cutoffDate; + } else { + return DEFAULT_CUTOFF_DATE; + } + } } diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java new file mode 100644 index 0000000000000..890bc464a2579 --- /dev/null +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.AbstractLicensesIntegrationTestCase; +import org.elasticsearch.license.GetFeatureUsageRequest; +import org.elasticsearch.license.GetFeatureUsageResponse; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.TransportGetFeatureUsageAction; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.hamcrest.Matcher; +import org.junit.Before; + +import java.nio.file.Path; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createGoldOrPlatinumLicense; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +@ESIntegTestCase.ClusterScope(scope = TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class LegacyLicenceIntegrationTests extends AbstractLicensesIntegrationTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(P.class); + } + + @Before + public void setup() throws Exception { + wipeAllLicenses(); + ensureGreen(); + License license = createGoldOrPlatinumLicense(); + putLicense(license); + ensureGreen(); + } + + public void testSyntheticSourceUsageDisallowed() { + createIndexWithSyntheticSourceAndAssertExpectedType("test", "STORED"); + + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, nullValue()); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, nullValue()); + } + + public void testSyntheticSourceUsageWithLegacyLicense() { + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-stacktraces", "synthetic"); + + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, not(nullValue())); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, nullValue()); + } + + public void testSyntheticSourceUsageWithLegacyLicensePastCutoff() throws Exception { + long startPastCutoff = LocalDateTime.of(2025, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + putLicense(createGoldOrPlatinumLicense(startPastCutoff)); + ensureGreen(); + + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-stacktraces", "STORED"); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, nullValue()); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, nullValue()); + } + + public void testSyntheticSourceUsageWithEnterpriseLicensePastCutoff() throws Exception { + long startPastCutoff = LocalDateTime.of(2025, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + putLicense(createEnterpriseLicense(startPastCutoff)); + ensureGreen(); + + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-traces", "synthetic"); + // also supports non-exceptional indices + createIndexWithSyntheticSourceAndAssertExpectedType("test", "synthetic"); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, nullValue()); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, not(nullValue())); + } + + public void testSyntheticSourceUsageTracksBothLegacyAndRegularFeature() throws Exception { + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-traces", "synthetic"); + + putLicense(createEnterpriseLicense()); + ensureGreen(); + + createIndexWithSyntheticSourceAndAssertExpectedType(".profiling-traces-v2", "synthetic"); + + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY, not(nullValue())); + assertFeatureUsage(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE, not(nullValue())); + } + + private void createIndexWithSyntheticSourceAndAssertExpectedType(String indexName, String expectedType) { + var settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "synthetic").build(); + createIndex(indexName, settings); + var response = admin().indices().getSettings(new GetSettingsRequest().indices(indexName)).actionGet(); + assertThat( + response.getIndexToSettings().get(indexName).get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey()), + equalTo(expectedType) + ); + } + + private List getFeatureUsageInfo() { + return client().execute(TransportGetFeatureUsageAction.TYPE, new GetFeatureUsageRequest()).actionGet().getFeatures(); + } + + private void assertFeatureUsage(LicensedFeature.Momentary syntheticSourceFeature, Matcher matcher) { + GetFeatureUsageResponse.FeatureUsageInfo featureUsage = getFeatureUsageInfo().stream() + .filter(f -> f.getFamily().equals(SyntheticSourceLicenseService.MAPPINGS_FEATURE_FAMILY)) + .filter(f -> f.getName().equals(syntheticSourceFeature.getName())) + .findAny() + .orElse(null); + assertThat(featureUsage, matcher); + } + + public static class P extends LocalStateCompositeXPackPlugin { + + public P(final Settings settings, final Path configPath) { + super(settings, configPath); + plugins.add(new LogsDBPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return P.this.getLicenseState(); + } + + @Override + protected LicenseService getLicenseService() { + return P.this.getLicenseService(); + } + }); + } + + } +} diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java new file mode 100644 index 0000000000000..eda0d87868745 --- /dev/null +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.List; + +import static org.elasticsearch.xpack.logsdb.SyntheticSourceIndexSettingsProviderTests.getLogsdbIndexModeSettingsProvider; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createGoldOrPlatinumLicense; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SyntheticSourceIndexSettingsProviderLegacyLicenseTests extends ESTestCase { + + private SyntheticSourceIndexSettingsProvider provider; + + @Before + public void setup() throws Exception { + long time = LocalDateTime.of(2024, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(); + var licenseState = new XPackLicenseState(() -> time, new XPackLicenseStatus(license.operationMode(), true, null)); + + var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + SyntheticSourceLicenseService syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); + + provider = new SyntheticSourceIndexSettingsProvider( + syntheticSourceLicenseService, + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(false), + IndexVersion::current + ); + } + + public void testGetAdditionalIndexSettingsDefault() { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-my-app"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(1)); + assertThat(result.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey()), equalTo("STORED")); + } + + public void testGetAdditionalIndexSettingsApm() throws IOException { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-apm.app.test"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + + public void testGetAdditionalIndexSettingsProfiling() throws IOException { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + for (String dataStreamName : new String[] { "profiling-metrics", "profiling-events" }) { + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + + for (String indexName : new String[] { ".profiling-sq-executables", ".profiling-sq-leafframes", ".profiling-stacktraces" }) { + var result = provider.getAdditionalIndexSettings(indexName, null, null, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + } + + public void testGetAdditionalIndexSettingsTsdb() throws IOException { + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-my-app"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, IndexMode.TIME_SERIES, null, null, settings, List.of()); + assertThat(result.size(), equalTo(0)); + } + + public void testGetAdditionalIndexSettingsTsdbAfterCutoffDate() throws Exception { + long start = LocalDateTime.of(2025, 2, 2, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(start); + long time = LocalDateTime.of(2024, 12, 31, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + var licenseState = new XPackLicenseState(() -> time, new XPackLicenseStatus(license.operationMode(), true, null)); + + var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + SyntheticSourceLicenseService syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); + + provider = new SyntheticSourceIndexSettingsProvider( + syntheticSourceLicenseService, + im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), + getLogsdbIndexModeSettingsProvider(false), + IndexVersion::current + ); + + Settings settings = Settings.builder().put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "SYNTHETIC").build(); + String dataStreamName = "metrics-my-app"; + String indexName = DataStream.getDefaultBackingIndexName(dataStreamName, 0); + var result = provider.getAdditionalIndexSettings(indexName, dataStreamName, IndexMode.TIME_SERIES, null, null, settings, List.of()); + assertThat(result.size(), equalTo(1)); + assertThat(result.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey()), equalTo("STORED")); + } +} diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java index 2d8723a0d8c25..df1fb8f2d958c 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderTests.java @@ -15,8 +15,11 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -27,6 +30,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.builder; +import static org.elasticsearch.xpack.logsdb.SyntheticSourceLicenseServiceTests.createEnterpriseLicense; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -38,23 +42,27 @@ public class SyntheticSourceIndexSettingsProviderTests extends ESTestCase { private SyntheticSourceIndexSettingsProvider provider; private final AtomicInteger newMapperServiceCounter = new AtomicInteger(); - private static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { + static LogsdbIndexModeSettingsProvider getLogsdbIndexModeSettingsProvider(boolean enabled) { return new LogsdbIndexModeSettingsProvider(Settings.builder().put("cluster.logsdb.enabled", enabled).build()); } @Before - public void setup() { - MockLicenseState licenseState = mock(MockLicenseState.class); + public void setup() throws Exception { + MockLicenseState licenseState = MockLicenseState.createMock(); when(licenseState.isAllowed(any())).thenReturn(true); var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); licenseService.setLicenseState(licenseState); + var mockLicenseService = mock(LicenseService.class); + License license = createEnterpriseLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); syntheticSourceLicenseService = new SyntheticSourceLicenseService(Settings.EMPTY); syntheticSourceLicenseService.setLicenseState(licenseState); + syntheticSourceLicenseService.setLicenseService(mockLicenseService); provider = new SyntheticSourceIndexSettingsProvider(syntheticSourceLicenseService, im -> { newMapperServiceCounter.incrementAndGet(); return MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()); - }, getLogsdbIndexModeSettingsProvider(false)); + }, getLogsdbIndexModeSettingsProvider(false), IndexVersion::current); newMapperServiceCounter.set(0); } @@ -80,10 +88,12 @@ public void testNewIndexHasSyntheticSourceUsage() throws IOException { boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); assertTrue(result); assertThat(newMapperServiceCounter.get(), equalTo(1)); + assertWarnings(SourceFieldMapper.DEPRECATION_WARNING); } { String mapping; - if (randomBoolean()) { + boolean withSourceMode = randomBoolean(); + if (withSourceMode) { mapping = """ { "_doc": { @@ -114,6 +124,9 @@ public void testNewIndexHasSyntheticSourceUsage() throws IOException { boolean result = provider.newIndexHasSyntheticSourceUsage(indexName, null, settings, List.of(new CompressedXContent(mapping))); assertFalse(result); assertThat(newMapperServiceCounter.get(), equalTo(2)); + if (withSourceMode) { + assertWarnings(SourceFieldMapper.DEPRECATION_WARNING); + } } } @@ -336,7 +349,8 @@ public void testGetAdditionalIndexSettingsDowngradeFromSyntheticSourceFileMatch( provider = new SyntheticSourceIndexSettingsProvider( syntheticSourceLicenseService, im -> MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), im.getSettings(), im.getIndex().getName()), - getLogsdbIndexModeSettingsProvider(true) + getLogsdbIndexModeSettingsProvider(true), + IndexVersion::current ); final Settings settings = Settings.EMPTY; diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java index 430ee75eb3561..90a13b16c028e 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseServiceTests.java @@ -8,54 +8,195 @@ package org.elasticsearch.xpack.logsdb; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.MockLicenseState; +import org.elasticsearch.license.TestUtils; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import org.mockito.Mockito; +import java.time.LocalDateTime; +import java.time.ZoneOffset; +import java.util.UUID; + +import static org.elasticsearch.license.TestUtils.dateMath; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SyntheticSourceLicenseServiceTests extends ESTestCase { + private LicenseService mockLicenseService; + private SyntheticSourceLicenseService licenseService; + + @Before + public void setup() throws Exception { + mockLicenseService = mock(LicenseService.class); + License license = createEnterpriseLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + } + public void testLicenseAllowsSyntheticSource() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(true); licenseService.setLicenseState(licenseState); - assertFalse("synthetic source is allowed, so not fallback to stored source", licenseService.fallbackToStoredSource(false)); + licenseService.setLicenseService(mockLicenseService); + assertFalse( + "synthetic source is allowed, so not fallback to stored source", + licenseService.fallbackToStoredSource(false, randomBoolean()) + ); Mockito.verify(licenseState, Mockito.times(1)).featureUsed(any()); } public void testLicenseAllowsSyntheticSourceTemplateValidation() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(true); licenseService.setLicenseState(licenseState); - assertFalse("synthetic source is allowed, so not fallback to stored source", licenseService.fallbackToStoredSource(true)); + licenseService.setLicenseService(mockLicenseService); + assertFalse( + "synthetic source is allowed, so not fallback to stored source", + licenseService.fallbackToStoredSource(true, randomBoolean()) + ); Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); } public void testDefaultDisallow() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(false); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(false); licenseService.setLicenseState(licenseState); - assertTrue("synthetic source is not allowed, so fallback to stored source", licenseService.fallbackToStoredSource(false)); + licenseService.setLicenseService(mockLicenseService); + assertTrue( + "synthetic source is not allowed, so fallback to stored source", + licenseService.fallbackToStoredSource(false, randomBoolean()) + ); Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); } public void testFallback() { - MockLicenseState licenseState = mock(MockLicenseState.class); - when(licenseState.isAllowed(any())).thenReturn(true); - var licenseService = new SyntheticSourceLicenseService(Settings.EMPTY); + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(true); licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); licenseService.setSyntheticSourceFallback(true); assertTrue( "synthetic source is allowed, but fallback has been enabled, so fallback to stored source", - licenseService.fallbackToStoredSource(false) + licenseService.fallbackToStoredSource(false, randomBoolean()) ); Mockito.verifyNoInteractions(licenseState); + Mockito.verifyNoInteractions(mockLicenseService); + } + + public void testGoldOrPlatinumLicense() throws Exception { + mockLicenseService = mock(LicenseService.class); + License license = createGoldOrPlatinumLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY))).thenReturn(true); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertFalse( + "legacy licensed usage is allowed, so not fallback to stored source", + licenseService.fallbackToStoredSource(false, true) + ); + Mockito.verify(licenseState, Mockito.times(1)).featureUsed(any()); } + public void testGoldOrPlatinumLicenseLegacyLicenseNotAllowed() throws Exception { + mockLicenseService = mock(LicenseService.class); + License license = createGoldOrPlatinumLicense(); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(false); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertTrue( + "legacy licensed usage is not allowed, so fallback to stored source", + licenseService.fallbackToStoredSource(false, false) + ); + Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); + Mockito.verify(licenseState, Mockito.times(1)).isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE)); + } + + public void testGoldOrPlatinumLicenseBeyondCutoffDate() throws Exception { + long start = LocalDateTime.of(2025, 1, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(start); + mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE))).thenReturn(false); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertTrue("beyond cutoff date, so fallback to stored source", licenseService.fallbackToStoredSource(false, true)); + Mockito.verify(licenseState, Mockito.never()).featureUsed(any()); + Mockito.verify(licenseState, Mockito.times(1)).isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE)); + } + + public void testGoldOrPlatinumLicenseCustomCutoffDate() throws Exception { + licenseService = new SyntheticSourceLicenseService(Settings.EMPTY, "2025-01-02T00:00"); + + long start = LocalDateTime.of(2025, 1, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + License license = createGoldOrPlatinumLicense(start); + mockLicenseService = mock(LicenseService.class); + when(mockLicenseService.getLicense()).thenReturn(license); + + MockLicenseState licenseState = MockLicenseState.createMock(); + when(licenseState.getOperationMode()).thenReturn(license.operationMode()); + when(licenseState.isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY))).thenReturn(true); + licenseService.setLicenseState(licenseState); + licenseService.setLicenseService(mockLicenseService); + assertFalse("custom cutoff date, so fallback to stored source", licenseService.fallbackToStoredSource(false, true)); + Mockito.verify(licenseState, Mockito.times(1)).featureUsed(any()); + Mockito.verify(licenseState, Mockito.times(1)).isAllowed(same(SyntheticSourceLicenseService.SYNTHETIC_SOURCE_FEATURE_LEGACY)); + } + + static License createEnterpriseLicense() throws Exception { + long start = LocalDateTime.of(2024, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + return createEnterpriseLicense(start); + } + + static License createEnterpriseLicense(long start) throws Exception { + String uid = UUID.randomUUID().toString(); + long currentTime = System.currentTimeMillis(); + final License.Builder builder = License.builder() + .uid(uid) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+2d", currentTime)) + .startDate(start) + .issueDate(currentTime) + .type("enterprise") + .issuedTo("customer") + .issuer("elasticsearch") + .maxResourceUnits(10); + return TestUtils.generateSignedLicense(builder); + } + + static License createGoldOrPlatinumLicense() throws Exception { + long start = LocalDateTime.of(2024, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + return createGoldOrPlatinumLicense(start); + } + + static License createGoldOrPlatinumLicense(long start) throws Exception { + String uid = UUID.randomUUID().toString(); + long currentTime = System.currentTimeMillis(); + final License.Builder builder = License.builder() + .uid(uid) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+100d", currentTime)) + .startDate(start) + .issueDate(currentTime) + .type(randomBoolean() ? "gold" : "platinum") + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + return TestUtils.generateSignedLicense(builder); + } } diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/40_source_mode_setting.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/40_source_mode_setting.yml index 33fedce3b59c1..792df4dbf639e 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/40_source_mode_setting.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/40_source_mode_setting.yml @@ -470,13 +470,7 @@ create an index with time_series index mode and synthetic source: indices.get_settings: index: "test_time_series_index_mode_synthetic" - match: { test_time_series_index_mode_synthetic.settings.index.mode: time_series } - - - - do: - indices.get_mapping: - index: test_time_series_index_mode_synthetic - - - match: { test_time_series_index_mode_synthetic.mappings._source.mode: synthetic } + - match: { test_time_series_index_mode_synthetic.settings.index.mapping.source.mode: synthetic } --- create an index with logsdb index mode and synthetic source: @@ -493,12 +487,7 @@ create an index with logsdb index mode and synthetic source: indices.get_settings: index: "test_logsdb_index_mode_synthetic" - match: { test_logsdb_index_mode_synthetic.settings.index.mode: logsdb } - - - do: - indices.get_mapping: - index: test_logsdb_index_mode_synthetic - - - match: { test_logsdb_index_mode_synthetic.mappings._source.mode: synthetic } + - match: { test_logsdb_index_mode_synthetic.settings.index.mapping.source.mode: synthetic } --- create an index with time_series index mode and stored source: @@ -524,12 +513,7 @@ create an index with time_series index mode and stored source: indices.get_settings: index: "test_time_series_index_mode_undefined" - match: { test_time_series_index_mode_undefined.settings.index.mode: time_series } - - - do: - indices.get_mapping: - index: test_time_series_index_mode_undefined - - - match: { test_time_series_index_mode_undefined.mappings._source.mode: stored } + - match: { test_time_series_index_mode_undefined.settings.index.mapping.source.mode: stored } --- create an index with logsdb index mode and stored source: @@ -546,12 +530,7 @@ create an index with logsdb index mode and stored source: indices.get_settings: index: "test_logsdb_index_mode_undefined" - match: { test_logsdb_index_mode_undefined.settings.index.mode: logsdb } - - - do: - indices.get_mapping: - index: test_logsdb_index_mode_undefined - - - match: { test_logsdb_index_mode_undefined.mappings._source.mode: stored } + - match: { test_logsdb_index_mode_undefined.settings.index.mapping.source.mode: stored } --- create an index with time_series index mode and disabled source: diff --git a/x-pack/plugin/mapper-aggregate-metric/build.gradle b/x-pack/plugin/mapper-aggregate-metric/build.gradle index bae5acc21fc75..2a7841929b21d 100644 --- a/x-pack/plugin/mapper-aggregate-metric/build.gradle +++ b/x-pack/plugin/mapper-aggregate-metric/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License diff --git a/x-pack/plugin/mapper-constant-keyword/build.gradle b/x-pack/plugin/mapper-constant-keyword/build.gradle index ad9d3c2f86637..c1e0eb61b611b 100644 --- a/x-pack/plugin/mapper-constant-keyword/build.gradle +++ b/x-pack/plugin/mapper-constant-keyword/build.gradle @@ -1,7 +1,12 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-es-plugin' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' esplugin { name 'constant-keyword' @@ -18,7 +23,7 @@ dependencies { compileOnly project(path: xpackModule('core')) } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index 216f82552353b..fa5d9428bb0c6 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.constantkeyword.mapper; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -58,7 +57,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.stream.Stream; /** * A {@link FieldMapper} that assigns every document the same value. @@ -356,40 +354,14 @@ protected SyntheticSourceSupport syntheticSourceSupport() { return new SyntheticSourceSupport.Native(SourceLoader.SyntheticFieldLoader.NOTHING); } - var loader = new SourceLoader.SyntheticFieldLoader() { - @Override - public Stream> storedFieldLoaders() { - return Stream.of(); - } - - @Override - public DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) { - return docId -> true; - } - - @Override - public boolean hasValue() { - return true; - } - - @Override - public void write(XContentBuilder b) throws IOException { - if (fieldType().value != null) { - b.field(leafName(), fieldType().value); - } - } - - @Override - public void reset() { - // NOOP - } - - @Override - public String fieldName() { - return fullPath(); - } - }; + /* + If there was no value in the document, synthetic source should not have the value too. + This is consistent with stored source behavior and is important for scenarios + like reindexing into an index that has a different value of this value in the mapping. - return new SyntheticSourceSupport.Native(loader); + In order to do that we use fallback logic which implements exactly such logic (_source only contains value + if it was in the original document). + */ + return new SyntheticSourceSupport.Fallback(); } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java index 4661fe77e8b11..2b9170afdfd70 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapperTests.java @@ -333,6 +333,17 @@ public void testNullValueSyntheticSource() throws IOException { assertThat(syntheticSource(mapper, b -> {}), equalTo("{}")); } + public void testNoValueInDocumentSyntheticSource() throws IOException { + DocumentMapper mapper = createSytheticSourceMapperService(mapping(b -> { + b.startObject("field"); + b.field("type", "constant_keyword"); + b.field("value", randomAlphaOfLength(5)); + b.endObject(); + })).documentMapper(); + + assertThat(syntheticSource(mapper, b -> {}), equalTo("{}")); + } + @Override protected boolean supportsEmptyInputArray() { return false; diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordClientYamlTestSuiteIT.java b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordClientYamlTestSuiteIT.java index 789059d9e11c0..5b6048b481abf 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordClientYamlTestSuiteIT.java +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/java/org/elasticsearch/xpack/constantkeyword/ConstantKeywordClientYamlTestSuiteIT.java @@ -10,8 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; /** Runs yaml rest tests */ public class ConstantKeywordClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -24,4 +26,12 @@ public ConstantKeywordClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidat public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("constant-keyword").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml index d40f69f483dbb..012b1006b8d20 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml +++ b/x-pack/plugin/mapper-constant-keyword/src/yamlRestTest/resources/rest-api-spec/test/20_synthetic_source.yml @@ -1,7 +1,7 @@ constant_keyword: - requires: - cluster_features: [ "mapper.source.mode_from_index_setting" ] - reason: "Source mode configured through index setting" + cluster_features: [ "mapper.constant_keyword.synthetic_source_write_fix" ] + reason: "Behavior fix" - do: indices.create: @@ -26,6 +26,15 @@ constant_keyword: body: kwd: foo + - do: + index: + index: test + id: 2 + refresh: true + body: + kwd: foo + const_kwd: bar + - do: search: index: test @@ -33,6 +42,19 @@ constant_keyword: query: ids: values: [1] + + - match: + hits.hits.0._source: + kwd: foo + + - do: + search: + index: test + body: + query: + ids: + values: [2] + - match: hits.hits.0._source: kwd: foo diff --git a/x-pack/plugin/mapper-unsigned-long/build.gradle b/x-pack/plugin/mapper-unsigned-long/build.gradle index e011723da6230..17a4f8a03fa57 100644 --- a/x-pack/plugin/mapper-unsigned-long/build.gradle +++ b/x-pack/plugin/mapper-unsigned-long/build.gradle @@ -1,6 +1,3 @@ -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License @@ -8,11 +5,13 @@ import org.elasticsearch.gradle.internal.info.BuildParams * 2.0. */ +import org.elasticsearch.gradle.Version + evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.internal-es-plugin' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { name 'unsigned-long' @@ -36,13 +35,3 @@ restResources { include '_common', 'bulk', 'indices', 'index', 'search', 'xpack' } } - -if (BuildParams.isSnapshotBuild() == false) { - tasks.named("test").configure { - systemProperty 'es.index_mode_feature_flag_registered', 'true' - } -} - -testClusters.configureEach { - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") -} diff --git a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongClientYamlTestSuiteIT.java b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongClientYamlTestSuiteIT.java index 008bfb193387c..df2c5d81ca14a 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongClientYamlTestSuiteIT.java +++ b/x-pack/plugin/mapper-unsigned-long/src/yamlRestTest/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongClientYamlTestSuiteIT.java @@ -10,8 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; /** Runs yaml rest tests */ public class UnsignedLongClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -24,4 +26,12 @@ public UnsignedLongClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate t public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("unsigned-long").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/mapper-version/build.gradle b/x-pack/plugin/mapper-version/build.gradle index 69622762b9d5b..bf78c61523e39 100644 --- a/x-pack/plugin/mapper-version/build.gradle +++ b/x-pack/plugin/mapper-version/build.gradle @@ -1,11 +1,15 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ evaluationDependsOn(xpackModule('core')) - apply plugin: 'elasticsearch.internal-es-plugin' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -25,8 +29,3 @@ dependencies { testImplementation project(path: xpackModule('analytics')) } -if (BuildParams.isSnapshotBuild() == false) { - tasks.named("test").configure { - systemProperty 'es.index_mode_feature_flag_registered', 'true' - } -} diff --git a/x-pack/plugin/mapper-version/src/yamlRestTest/java/org/elasticsearch/xpack/versionfield/VersionClientYamlTestSuiteIT.java b/x-pack/plugin/mapper-version/src/yamlRestTest/java/org/elasticsearch/xpack/versionfield/VersionClientYamlTestSuiteIT.java index bc9f32766a3bb..3474d5ce9be8c 100644 --- a/x-pack/plugin/mapper-version/src/yamlRestTest/java/org/elasticsearch/xpack/versionfield/VersionClientYamlTestSuiteIT.java +++ b/x-pack/plugin/mapper-version/src/yamlRestTest/java/org/elasticsearch/xpack/versionfield/VersionClientYamlTestSuiteIT.java @@ -10,8 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; /** Runs yaml rest tests */ public class VersionClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -24,4 +26,12 @@ public VersionClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCa public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("mapper-version").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/DownloadTaskRemovedListener.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/DownloadTaskRemovedListener.java new file mode 100644 index 0000000000000..929dac6ee357a --- /dev/null +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/DownloadTaskRemovedListener.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.packageloader.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.tasks.RemovedTaskListener; +import org.elasticsearch.tasks.Task; + +public record DownloadTaskRemovedListener(ModelDownloadTask trackedTask, ActionListener listener) + implements + RemovedTaskListener { + + @Override + public void onRemoved(Task task) { + if (task.getId() == trackedTask.getId()) { + if (trackedTask.getTaskException() == null) { + listener.onResponse(AcknowledgedResponse.TRUE); + } else { + listener.onFailure(trackedTask.getTaskException()); + } + } + } +} diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelDownloadTask.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelDownloadTask.java index 59977bd418e11..dd09c3cf65fec 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelDownloadTask.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelDownloadTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.MlTasks; import java.io.IOException; import java.util.Map; @@ -51,9 +52,12 @@ public void writeTo(StreamOutput out) throws IOException { } private final AtomicReference downloadProgress = new AtomicReference<>(new DownLoadProgress(0, 0)); + private final String modelId; + private volatile Exception taskException; - public ModelDownloadTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { - super(id, type, action, description, parentTaskId, headers); + public ModelDownloadTask(long id, String type, String action, String modelId, TaskId parentTaskId, Map headers) { + super(id, type, action, taskDescription(modelId), parentTaskId, headers); + this.modelId = modelId; } void setProgress(int totalParts, int downloadedParts) { @@ -65,4 +69,19 @@ public DownloadStatus getStatus() { return new DownloadStatus(downloadProgress.get()); } + public String getModelId() { + return modelId; + } + + public void setTaskException(Exception exception) { + this.taskException = exception; + } + + public Exception getTaskException() { + return taskException; + } + + public static String taskDescription(String modelId) { + return MlTasks.downloadModelTaskDescription(modelId); + } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index 76b7781b1cffe..2a14a8761e357 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -30,7 +30,6 @@ import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.common.notifications.Level; @@ -42,6 +41,9 @@ import java.io.IOException; import java.net.MalformedURLException; import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -49,7 +51,6 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ml.MlTasks.MODEL_IMPORT_TASK_ACTION; import static org.elasticsearch.xpack.core.ml.MlTasks.MODEL_IMPORT_TASK_TYPE; -import static org.elasticsearch.xpack.core.ml.MlTasks.downloadModelTaskDescription; public class TransportLoadTrainedModelPackage extends TransportMasterNodeAction { @@ -57,6 +58,7 @@ public class TransportLoadTrainedModelPackage extends TransportMasterNodeAction< private final Client client; private final CircuitBreakerService circuitBreakerService; + final Map> taskRemovedListenersByModelId; @Inject public TransportLoadTrainedModelPackage( @@ -81,6 +83,7 @@ public TransportLoadTrainedModelPackage( ); this.client = new OriginSettingClient(client, ML_ORIGIN); this.circuitBreakerService = circuitBreakerService; + taskRemovedListenersByModelId = new HashMap<>(); } @Override @@ -91,6 +94,12 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { + if (handleDownloadInProgress(request.getModelId(), request.isWaitForCompletion(), listener)) { + logger.debug("Existing download of model [{}] in progress", request.getModelId()); + // download in progress, nothing to do + return; + } + ModelDownloadTask downloadTask = createDownloadTask(request); try { @@ -107,7 +116,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A var downloadCompleteListener = request.isWaitForCompletion() ? listener : ActionListener.noop(); - importModel(client, taskManager, request, modelImporter, downloadCompleteListener, downloadTask); + importModel(client, () -> unregisterTask(downloadTask), request, modelImporter, downloadTask, downloadCompleteListener); } catch (Exception e) { taskManager.unregister(downloadTask); listener.onFailure(e); @@ -124,22 +133,91 @@ private ParentTaskAssigningClient getParentTaskAssigningClient(Task originTask) return new ParentTaskAssigningClient(client, parentTaskId); } + /** + * Look for a current download task of the model and optionally wait + * for that task to complete if there is one. + * synchronized with {@code unregisterTask} to prevent the task being + * removed before the remove listener is added. + * @param modelId Model being downloaded + * @param isWaitForCompletion Wait until the download completes before + * calling the listener + * @param listener Model download listener + * @return True if a download task is in progress + */ + synchronized boolean handleDownloadInProgress( + String modelId, + boolean isWaitForCompletion, + ActionListener listener + ) { + var description = ModelDownloadTask.taskDescription(modelId); + var tasks = taskManager.getCancellableTasks().values(); + + ModelDownloadTask inProgress = null; + for (var task : tasks) { + if (description.equals(task.getDescription()) && task instanceof ModelDownloadTask downloadTask) { + inProgress = downloadTask; + break; + } + } + + if (inProgress != null) { + if (isWaitForCompletion == false) { + // Not waiting for the download to complete, it is enough that the download is in progress + // Respond now not when the download completes + listener.onResponse(AcknowledgedResponse.TRUE); + return true; + } + // Otherwise register a task removed listener which is called + // once the tasks is complete and unregistered + var tracker = new DownloadTaskRemovedListener(inProgress, listener); + taskRemovedListenersByModelId.computeIfAbsent(modelId, s -> new ArrayList<>()).add(tracker); + taskManager.registerRemovedTaskListener(tracker); + return true; + } + + return false; + } + + /** + * Unregister the completed task triggering any remove task listeners. + * This method is synchronized to prevent the task being removed while + * {@code waitForExistingDownload} is in progress. + * @param task The completed task + */ + synchronized void unregisterTask(ModelDownloadTask task) { + taskManager.unregister(task); // unregister will call the on remove function + + var trackers = taskRemovedListenersByModelId.remove(task.getModelId()); + if (trackers != null) { + for (var tracker : trackers) { + taskManager.unregisterRemovedTaskListener(tracker); + } + } + } + /** * This is package scope so that we can test the logic directly. - * This should only be called from the masterOperation method and the tests + * This should only be called from the masterOperation method and the tests. + * This method is static for testing. * * @param auditClient a client which should only be used to send audit notifications. This client cannot be associated with the passed * in task, that way when the task is cancelled the notification requests can * still be performed. If it is associated with the task (i.e. via ParentTaskAssigningClient), * then the requests will throw a TaskCancelledException. + * @param unregisterTaskFn Runnable to unregister the task. Because this is a static function + * a lambda is used rather than the instance method. + * @param request The download request + * @param modelImporter The importer + * @param task Download task + * @param listener Listener */ static void importModel( Client auditClient, - TaskManager taskManager, + Runnable unregisterTaskFn, Request request, ModelImporter modelImporter, - ActionListener listener, - Task task + ModelDownloadTask task, + ActionListener listener ) { final String modelId = request.getModelId(); final long relativeStartNanos = System.nanoTime(); @@ -155,9 +233,12 @@ static void importModel( Level.INFO ); listener.onResponse(AcknowledgedResponse.TRUE); - }, exception -> listener.onFailure(processException(auditClient, modelId, exception))); + }, exception -> { + task.setTaskException(exception); + listener.onFailure(processException(auditClient, modelId, exception)); + }); - modelImporter.doImport(ActionListener.runAfter(finishListener, () -> taskManager.unregister(task))); + modelImporter.doImport(ActionListener.runAfter(finishListener, unregisterTaskFn)); } static Exception processException(Client auditClient, String modelId, Exception e) { @@ -197,14 +278,7 @@ public TaskId getParentTask() { @Override public ModelDownloadTask createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new ModelDownloadTask( - id, - type, - action, - downloadModelTaskDescription(request.getModelId()), - parentTaskId, - headers - ); + return new ModelDownloadTask(id, type, action, request.getModelId(), parentTaskId, headers); } }, false); } diff --git a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java index cbcfd5b760779..3486ce6af0db5 100644 --- a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java +++ b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java @@ -10,13 +10,19 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.common.notifications.Level; import org.elasticsearch.xpack.core.ml.action.AuditMlNotificationAction; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ModelPackageConfig; @@ -27,9 +33,13 @@ import java.io.IOException; import java.net.MalformedURLException; import java.net.URISyntaxException; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.core.ml.MlTasks.MODEL_IMPORT_TASK_ACTION; +import static org.elasticsearch.xpack.core.ml.MlTasks.MODEL_IMPORT_TASK_TYPE; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.core.Is.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -37,6 +47,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class TransportLoadTrainedModelPackageTests extends ESTestCase { private static final String MODEL_IMPORT_FAILURE_MSG_FORMAT = "Model importing failed due to %s [%s]"; @@ -44,17 +55,10 @@ public class TransportLoadTrainedModelPackageTests extends ESTestCase { public void testSendsFinishedUploadNotification() { var uploader = createUploader(null); var taskManager = mock(TaskManager.class); - var task = mock(Task.class); + var task = mock(ModelDownloadTask.class); var client = mock(Client.class); - TransportLoadTrainedModelPackage.importModel( - client, - taskManager, - createRequestWithWaiting(), - uploader, - ActionListener.noop(), - task - ); + TransportLoadTrainedModelPackage.importModel(client, () -> {}, createRequestWithWaiting(), uploader, task, ActionListener.noop()); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); // 2 notifications- the start and finish messages @@ -108,32 +112,63 @@ public void testSendsWarningNotificationForTaskCancelledException() throws Excep public void testCallsOnResponseWithAcknowledgedResponse() throws Exception { var client = mock(Client.class); var taskManager = mock(TaskManager.class); - var task = mock(Task.class); + var task = mock(ModelDownloadTask.class); ModelImporter uploader = createUploader(null); var responseRef = new AtomicReference(); var listener = ActionListener.wrap(responseRef::set, e -> fail("received an exception: " + e.getMessage())); - TransportLoadTrainedModelPackage.importModel(client, taskManager, createRequestWithWaiting(), uploader, listener, task); + TransportLoadTrainedModelPackage.importModel(client, () -> {}, createRequestWithWaiting(), uploader, task, listener); assertThat(responseRef.get(), is(AcknowledgedResponse.TRUE)); } public void testDoesNotCallListenerWhenNotWaitingForCompletion() { var uploader = mock(ModelImporter.class); var client = mock(Client.class); - var taskManager = mock(TaskManager.class); - var task = mock(Task.class); - + var task = mock(ModelDownloadTask.class); TransportLoadTrainedModelPackage.importModel( client, - taskManager, + () -> {}, createRequestWithoutWaiting(), uploader, - ActionListener.running(ESTestCase::fail), - task + task, + ActionListener.running(ESTestCase::fail) ); } + public void testWaitForExistingDownload() { + var taskManager = mock(TaskManager.class); + var modelId = "foo"; + var task = new ModelDownloadTask(1L, MODEL_IMPORT_TASK_TYPE, MODEL_IMPORT_TASK_ACTION, modelId, new TaskId("node", 1L), Map.of()); + when(taskManager.getCancellableTasks()).thenReturn(Map.of(1L, task)); + + var transportService = mock(TransportService.class); + when(transportService.getTaskManager()).thenReturn(taskManager); + + var action = new TransportLoadTrainedModelPackage( + transportService, + mock(ClusterService.class), + mock(ThreadPool.class), + mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), + mock(Client.class), + mock(CircuitBreakerService.class) + ); + + assertTrue(action.handleDownloadInProgress(modelId, true, ActionListener.noop())); + verify(taskManager).registerRemovedTaskListener(any()); + assertThat(action.taskRemovedListenersByModelId.entrySet(), hasSize(1)); + assertThat(action.taskRemovedListenersByModelId.get(modelId), hasSize(1)); + + // With wait for completion == false no new removed listener will be added + assertTrue(action.handleDownloadInProgress(modelId, false, ActionListener.noop())); + verify(taskManager, times(1)).registerRemovedTaskListener(any()); + assertThat(action.taskRemovedListenersByModelId.entrySet(), hasSize(1)); + assertThat(action.taskRemovedListenersByModelId.get(modelId), hasSize(1)); + + assertFalse(action.handleDownloadInProgress("no-task-for-this-one", randomBoolean(), ActionListener.noop())); + } + private void assertUploadCallsOnFailure(Exception exception, String message, RestStatus status, Level level) throws Exception { var esStatusException = new ElasticsearchStatusException(message, status, exception); @@ -152,7 +187,7 @@ private void assertNotificationAndOnFailure( ) throws Exception { var client = mock(Client.class); var taskManager = mock(TaskManager.class); - var task = mock(Task.class); + var task = mock(ModelDownloadTask.class); ModelImporter uploader = createUploader(thrownException); var failureRef = new AtomicReference(); @@ -160,7 +195,14 @@ private void assertNotificationAndOnFailure( (AcknowledgedResponse response) -> { fail("received a acknowledged response: " + response.toString()); }, failureRef::set ); - TransportLoadTrainedModelPackage.importModel(client, taskManager, createRequestWithWaiting(), uploader, listener, task); + TransportLoadTrainedModelPackage.importModel( + client, + () -> taskManager.unregister(task), + createRequestWithWaiting(), + uploader, + task, + listener + ); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); // 2 notifications- the starting message and the failure diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index e79a771293392..716c401a9fcc8 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' @@ -94,7 +99,7 @@ dependencies { } def mlCppVersion(){ - return (project.gradle.parent != null && BuildParams.isSnapshotBuild() == false) ? + return (project.gradle.parent != null && buildParams.isSnapshotBuild() == false) ? (project.version + "-SNAPSHOT") : project.version; } diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index 64970d18b5c82..07dc1cc3c612a 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -18,7 +23,7 @@ testClusters.configureEach { setting 'slm.history_index_enabled', 'false' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ml/qa/disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle index 232700d5f84aa..9d157b3e7fa32 100644 --- a/x-pack/plugin/ml/qa/disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -12,7 +17,7 @@ testClusters.configureEach { setting 'xpack.ml.enabled', 'false' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index bc22552d0d734..c0d6913d85590 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -1,8 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -12,7 +18,7 @@ dependencies { testImplementation project(':x-pack:qa') } -Version ccsCompatVersion = BuildParams.bwcVersions.minimumWireCompatibleVersion +Version ccsCompatVersion = buildParams.bwcVersions.minimumWireCompatibleVersion restResources { restApi { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index 447bca4f4e688..94fbde69e29c7 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.ml.integration; -import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -222,8 +221,6 @@ public void testMemoryStatus() { } public void testOverflowToDisk() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/44609", Constants.WINDOWS); - Detector.Builder detector = new Detector.Builder("mean", "value"); detector.setByFieldName("clientIP"); diff --git a/x-pack/plugin/ml/qa/single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle index 6979ec4dcbd31..02421d9bb3d14 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -12,7 +17,7 @@ testClusters.configureEach { setting 'xpack.license.self_generated.type', 'trial' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java index 08d09f70cb46b..479fb20650b18 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java @@ -11,14 +11,12 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; import org.elasticsearch.action.ingest.SimulatePipelineAction; -import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; @@ -61,13 +59,13 @@ import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; -import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.jsonSimulatePipelineRequest; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasItem; @@ -541,11 +539,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { }}] }""", pipeline); PlainActionFuture simulatePipelineListener = new PlainActionFuture<>(); - client().execute( - SimulatePipelineAction.INSTANCE, - new SimulatePipelineRequest(new BytesArray(simulateSource.getBytes(StandardCharsets.UTF_8)), XContentType.JSON), - simulatePipelineListener - ); + client().execute(SimulatePipelineAction.INSTANCE, jsonSimulatePipelineRequest(simulateSource), simulatePipelineListener); assertThat(simulatePipelineListener.actionGet().getResults(), is(not(empty()))); @@ -575,7 +569,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { // Simulating the pipeline should fail SimulateDocumentBaseResult simulateResponse = (SimulateDocumentBaseResult) client().execute( SimulatePipelineAction.INSTANCE, - new SimulatePipelineRequest(new BytesArray(simulateSource.getBytes(StandardCharsets.UTF_8)), XContentType.JSON) + jsonSimulatePipelineRequest(simulateSource) ).actionGet().getResults().get(0); assertThat(simulateResponse.getFailure(), is(not(nullValue()))); assertThat((simulateResponse.getFailure()).getCause(), is(instanceOf(ElasticsearchSecurityException.class))); @@ -588,11 +582,7 @@ public void testMachineLearningCreateInferenceProcessorRestricted() { putJsonPipeline("test_infer_license_pipeline", pipeline); PlainActionFuture simulatePipelineListenerNewLicense = new PlainActionFuture<>(); - client().execute( - SimulatePipelineAction.INSTANCE, - new SimulatePipelineRequest(new BytesArray(simulateSource.getBytes(StandardCharsets.UTF_8)), XContentType.JSON), - simulatePipelineListenerNewLicense - ); + client().execute(SimulatePipelineAction.INSTANCE, jsonSimulatePipelineRequest(simulateSource), simulatePipelineListenerNewLicense); assertThat(simulatePipelineListenerNewLicense.actionGet().getResults(), is(not(empty()))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 1feb95661f33a..8363e0f5c19a1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -69,7 +68,6 @@ import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; -import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.ShutdownAwarePlugin; @@ -931,15 +929,6 @@ public Collection createComponents(PluginServices services) { return List.of(new JobManagerHolder(), new MachineLearningExtensionHolder()); } - if ("darwin-x86_64".equals(Platforms.PLATFORM_NAME)) { - String msg = "The machine learning plugin will be permanently disabled on macOS x86_64 in new minor versions released " - + "from December 2024 onwards. To continue to use machine learning functionality on macOS please switch to an arm64 " - + "machine (Apple silicon). Alternatively, it will still be possible to run Elasticsearch with machine learning " - + "enabled in a Docker container on macOS x86_64."; - logger.warn(msg); - deprecationLogger.warn(DeprecationCategory.PLUGINS, "ml-darwin-x86_64", msg); - } - machineLearningExtension.get().configure(environment.settings()); this.mlUpgradeModeActionFilter.set(new MlUpgradeModeActionFilter(clusterService)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index ba4483493da1d..e0405b1749536 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -45,7 +45,6 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.InferenceWaitForAllocation; import org.elasticsearch.xpack.ml.inference.adaptiveallocations.AdaptiveAllocationsScalerService; -import org.elasticsearch.xpack.ml.inference.adaptiveallocations.ScaleFromZeroFeatureFlag; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentService; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; @@ -277,13 +276,11 @@ private void inferAgainstAllocatedModel( boolean starting = adaptiveAllocationsScalerService.maybeStartAllocation(assignment); if (starting) { message += "; starting deployment of one allocation"; - - if (ScaleFromZeroFeatureFlag.isEnabled()) { - waitForAllocation.waitForAssignment( - new InferenceWaitForAllocation.WaitingRequest(request, responseBuilder, parentTaskId, listener) - ); - return; - } + logger.debug(message); + waitForAllocation.waitForAssignment( + new InferenceWaitForAllocation.WaitingRequest(request, responseBuilder, parentTaskId, listener) + ); + return; } logger.debug(message); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index 744d5dbd6974f..5912619e892ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -9,35 +9,27 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Predicates; -import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.AbstractTransportSetUpgradeModeAction; +import org.elasticsearch.xpack.core.action.SetUpgradeModeActionRequest; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; @@ -48,7 +40,6 @@ import java.util.Comparator; import java.util.List; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import static org.elasticsearch.ExceptionsHelper.rethrowAndSuppress; @@ -58,12 +49,11 @@ import static org.elasticsearch.xpack.core.ml.MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME; import static org.elasticsearch.xpack.core.ml.MlTasks.JOB_TASK_NAME; -public class TransportSetUpgradeModeAction extends AcknowledgedTransportMasterNodeAction { +public class TransportSetUpgradeModeAction extends AbstractTransportSetUpgradeModeAction { private static final Set ML_TASK_NAMES = Set.of(JOB_TASK_NAME, DATAFEED_TASK_NAME, DATA_FRAME_ANALYTICS_TASK_NAME); private static final Logger logger = LogManager.getLogger(TransportSetUpgradeModeAction.class); - private final AtomicBoolean isRunning = new AtomicBoolean(false); private final PersistentTasksClusterService persistentTasksClusterService; private final PersistentTasksService persistentTasksService; private final OriginSettingClient client; @@ -79,69 +69,38 @@ public TransportSetUpgradeModeAction( Client client, PersistentTasksService persistentTasksService ) { - super( - SetUpgradeModeAction.NAME, - transportService, - clusterService, - threadPool, - actionFilters, - SetUpgradeModeAction.Request::new, - indexNameExpressionResolver, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(SetUpgradeModeAction.NAME, "ml", transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver); this.persistentTasksClusterService = persistentTasksClusterService; this.client = new OriginSettingClient(client, ML_ORIGIN); this.persistentTasksService = persistentTasksService; } @Override - protected void masterOperation( - Task task, - SetUpgradeModeAction.Request request, - ClusterState state, - ActionListener listener - ) throws Exception { - - // Don't want folks spamming this endpoint while it is in progress, only allow one request to be handled at a time - if (isRunning.compareAndSet(false, true) == false) { - String msg = "Attempted to set [upgrade_mode] to [" - + request.isEnabled() - + "] from [" - + MlMetadata.getMlMetadata(state).isUpgradeMode() - + "] while previous request was processing."; - logger.info(msg); - Exception detail = new IllegalStateException(msg); - listener.onFailure( - new ElasticsearchStatusException( - "Cannot change [upgrade_mode]. Previous request is still being processed.", - RestStatus.TOO_MANY_REQUESTS, - detail - ) - ); - return; - } + protected String featureName() { + return "ml-set-upgrade-mode"; + } - // Noop, nothing for us to do, simply return fast to the caller - if (request.isEnabled() == MlMetadata.getMlMetadata(state).isUpgradeMode()) { - logger.info("Upgrade mode noop"); - isRunning.set(false); - listener.onResponse(AcknowledgedResponse.TRUE); - return; - } + @Override + protected boolean upgradeMode(ClusterState state) { + return MlMetadata.getMlMetadata(state).isUpgradeMode(); + } - logger.info( - "Starting to set [upgrade_mode] to [" + request.isEnabled() + "] from [" + MlMetadata.getMlMetadata(state).isUpgradeMode() + "]" - ); + @Override + protected ClusterState createUpdatedState(SetUpgradeModeActionRequest request, ClusterState currentState) { + logger.trace("Executing cluster state update"); + MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metadata().custom(MlMetadata.TYPE)); + builder.isUpgradeMode(request.enabled()); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(MlMetadata.TYPE, builder.build()).build()); + return newState.build(); + } - ActionListener wrappedListener = ActionListener.wrap(r -> { - logger.info("Completed upgrade mode request"); - isRunning.set(false); - listener.onResponse(r); - }, e -> { - logger.info("Completed upgrade mode request but with failure", e); - isRunning.set(false); - listener.onFailure(e); - }); + protected void upgradeModeSuccessfullyChanged( + Task task, + SetUpgradeModeActionRequest request, + ClusterState state, + ActionListener wrappedListener + ) { final PersistentTasksCustomMetadata tasksCustomMetadata = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); // <4> We have unassigned the tasks, respond to the listener. @@ -201,71 +160,29 @@ protected void masterOperation( */ - ActionListener clusterStateUpdateListener = ActionListener.wrap(acknowledgedResponse -> { - // State change was not acknowledged, we either timed out or ran into some exception - // We should not continue and alert failure to the end user - if (acknowledgedResponse.isAcknowledged() == false) { - logger.info("Cluster state update is NOT acknowledged"); - wrappedListener.onFailure(new ElasticsearchTimeoutException("Unknown error occurred while updating cluster state")); - return; - } - - // There are no tasks to worry about starting/stopping - if (tasksCustomMetadata == null || tasksCustomMetadata.tasks().isEmpty()) { - logger.info("No tasks to worry about after state update"); - wrappedListener.onResponse(AcknowledgedResponse.TRUE); - return; - } - - // Did we change from disabled -> enabled? - if (request.isEnabled()) { - logger.info("Enabling upgrade mode, must isolate datafeeds"); - isolateDatafeeds(tasksCustomMetadata, isolateDatafeedListener); - } else { - logger.info("Disabling upgrade mode, must wait for tasks to not have AWAITING_UPGRADE assignment"); - persistentTasksService.waitForPersistentTasksCondition( - // Wait for jobs, datafeeds and analytics not to be "Awaiting upgrade" - persistentTasksCustomMetadata -> persistentTasksCustomMetadata.tasks() - .stream() - .noneMatch(t -> ML_TASK_NAMES.contains(t.getTaskName()) && t.getAssignment().equals(AWAITING_UPGRADE)), - request.ackTimeout(), - ActionListener.wrap(r -> { - logger.info("Done waiting for tasks to be out of AWAITING_UPGRADE"); - wrappedListener.onResponse(AcknowledgedResponse.TRUE); - }, wrappedListener::onFailure) - ); - } - }, wrappedListener::onFailure); - - // <1> Change MlMetadata to indicate that upgrade_mode is now enabled - submitUnbatchedTask("ml-set-upgrade-mode", new AckedClusterStateUpdateTask(request, clusterStateUpdateListener) { - - @Override - protected AcknowledgedResponse newResponse(boolean acknowledged) { - logger.trace("Cluster update response built: " + acknowledged); - return AcknowledgedResponse.of(acknowledged); - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - logger.trace("Executing cluster state update"); - MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metadata().custom(MlMetadata.TYPE)); - builder.isUpgradeMode(request.isEnabled()); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(MlMetadata.TYPE, builder.build()).build()); - return newState.build(); - } - }); - } - - @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here - private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) { - clusterService.submitUnbatchedStateUpdateTask(source, task); - } + if (tasksCustomMetadata == null || tasksCustomMetadata.tasks().isEmpty()) { + logger.info("No tasks to worry about after state update"); + wrappedListener.onResponse(AcknowledgedResponse.TRUE); + return; + } - @Override - protected ClusterBlockException checkBlock(SetUpgradeModeAction.Request request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + if (request.enabled()) { + logger.info("Enabling upgrade mode, must isolate datafeeds"); + isolateDatafeeds(tasksCustomMetadata, isolateDatafeedListener); + } else { + logger.info("Disabling upgrade mode, must wait for tasks to not have AWAITING_UPGRADE assignment"); + persistentTasksService.waitForPersistentTasksCondition( + // Wait for jobs, datafeeds and analytics not to be "Awaiting upgrade" + persistentTasksCustomMetadata -> persistentTasksCustomMetadata.tasks() + .stream() + .noneMatch(t -> ML_TASK_NAMES.contains(t.getTaskName()) && t.getAssignment().equals(AWAITING_UPGRADE)), + request.ackTimeout(), + ActionListener.wrap(r -> { + logger.info("Done waiting for tasks to be out of AWAITING_UPGRADE"); + wrappedListener.onResponse(AcknowledgedResponse.TRUE); + }, wrappedListener::onFailure) + ); + } } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 5fd70ce71cd24..f01372ca4f246 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -190,11 +190,11 @@ protected void masterOperation( () -> "[" + request.getDeploymentId() + "] creating new assignment for model [" + request.getModelId() + "] failed", e ); - if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException resourceAlreadyExistsException) { e = new ElasticsearchStatusException( "Cannot start deployment [{}] because it has already been started", RestStatus.CONFLICT, - e, + resourceAlreadyExistsException, request.getDeploymentId() ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java index e55736cf43607..e13b1e0033191 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregator.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.BytesRefHash; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Releasables; @@ -110,31 +111,33 @@ protected void doClose() { } @Override - public InternalAggregation[] buildAggregations(long[] ordsToCollect) throws IOException { - Bucket[][] topBucketsPerOrd = new Bucket[ordsToCollect.length][]; - for (int ordIdx = 0; ordIdx < ordsToCollect.length; ordIdx++) { - final long ord = ordsToCollect[ordIdx]; - final TokenListCategorizer categorizer = (ord < categorizers.size()) ? categorizers.get(ord) : null; - if (categorizer == null) { - topBucketsPerOrd[ordIdx] = new Bucket[0]; - continue; + public InternalAggregation[] buildAggregations(LongArray ordsToCollect) throws IOException { + try (ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(ordsToCollect.size())) { + for (long ordIdx = 0; ordIdx < ordsToCollect.size(); ordIdx++) { + final long ord = ordsToCollect.get(ordIdx); + final TokenListCategorizer categorizer = (ord < categorizers.size()) ? categorizers.get(ord) : null; + if (categorizer == null) { + topBucketsPerOrd.set(ordIdx, new Bucket[0]); + continue; + } + int size = (int) Math.min(bucketOrds.bucketsInOrd(ordIdx), bucketCountThresholds.getShardSize()); + checkRealMemoryCBForInternalBucket(); + topBucketsPerOrd.set(ordIdx, categorizer.toOrderedBuckets(size)); } - int size = (int) Math.min(bucketOrds.bucketsInOrd(ordIdx), bucketCountThresholds.getShardSize()); - topBucketsPerOrd[ordIdx] = categorizer.toOrderedBuckets(size); - } - buildSubAggsForAllBuckets(topBucketsPerOrd, Bucket::getBucketOrd, Bucket::setAggregations); - InternalAggregation[] results = new InternalAggregation[ordsToCollect.length]; - for (int ordIdx = 0; ordIdx < ordsToCollect.length; ordIdx++) { - results[ordIdx] = new InternalCategorizationAggregation( - name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), - similarityThreshold, - metadata(), - Arrays.asList(topBucketsPerOrd[ordIdx]) + buildSubAggsForAllBuckets(topBucketsPerOrd, Bucket::getBucketOrd, Bucket::setAggregations); + + return buildAggregations( + Math.toIntExact(ordsToCollect.size()), + ordIdx -> new InternalCategorizationAggregation( + name, + bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), + similarityThreshold, + metadata(), + Arrays.asList(topBucketsPerOrd.get(ordIdx)) + ) ); } - return results; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java index 7ef7a8f4e6dd5..efc041e2225a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java @@ -86,7 +86,7 @@ public int compareTo(BucketKey o) { } } - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket + public static class Bucket extends InternalMultiBucketAggregation.InternalBucketWritable implements MultiBucketsAggregation.Bucket, Comparable { @@ -142,8 +142,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + private void bucketToXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(CommonFields.DOC_COUNT.getPreferredName(), serializableCategory.getNumMatches()); builder.field(CommonFields.KEY.getPreferredName()); @@ -152,7 +151,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CategoryDefinition.MAX_MATCHING_LENGTH.getPreferredName(), serializableCategory.maxMatchingStringLen()); aggregations.toXContentInternal(builder, params); builder.endObject(); - return builder; } BucketKey getRawKey() { @@ -280,7 +278,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); + bucket.bucketToXContent(builder, params); } builder.endArray(); return builder; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/TokenListCategorizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/TokenListCategorizer.java index d0088edcb0805..7fef6cdafa372 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/TokenListCategorizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/TokenListCategorizer.java @@ -19,6 +19,7 @@ import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.xpack.ml.aggs.categorization.TokenListCategory.TokenAndWeight; +import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -83,6 +84,8 @@ public void close() { @Nullable private final CategorizationPartOfSpeechDictionary partOfSpeechDictionary; + private final List categoriesById; + /** * Categories stored in such a way that the most common are accessed first. * This is implemented as an {@link ArrayList} with bespoke ordering rather @@ -108,9 +111,20 @@ public TokenListCategorizer( this.lowerThreshold = threshold; this.upperThreshold = (1.0f + threshold) / 2.0f; this.categoriesByNumMatches = new ArrayList<>(); + this.categoriesById = new ArrayList<>(); cacheRamUsage(0); } + @Nullable + public TokenListCategory computeCategory(String s, CategorizationAnalyzer analyzer) { + try (TokenStream ts = analyzer.tokenStream("text", s)) { + return computeCategory(ts, s.length(), 1); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Nullable public TokenListCategory computeCategory(TokenStream ts, int unfilteredStringLen, long numDocs) throws IOException { assert partOfSpeechDictionary != null : "This version of computeCategory should only be used when a part-of-speech dictionary is available"; @@ -301,6 +315,7 @@ private synchronized TokenListCategory computeCategory( maxUnfilteredStringLen, numDocs ); + categoriesById.add(newCategory); categoriesByNumMatches.add(newCategory); cacheRamUsage(newCategory.ramBytesUsed()); return repositionCategory(newCategory, newIndex); @@ -412,6 +427,17 @@ static float similarity(List left, int leftWeight, List toCategories(int size) { + return categoriesByNumMatches.stream() + .limit(size) + .map(category -> new SerializableTokenListCategory(category, bytesRefHash)) + .toList(); + } + + public List toCategoriesById() { + return categoriesById.stream().map(category -> new SerializableTokenListCategory(category, bytesRefHash)).toList(); + } + public InternalCategorizationAggregation.Bucket[] toOrderedBuckets(int size) { return categoriesByNumMatches.stream() .limit(size) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java index c97166ac6fd80..aed0c40043cae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointBucket.java @@ -12,12 +12,13 @@ import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; -public class ChangePointBucket extends InternalMultiBucketAggregation.InternalBucket { +public class ChangePointBucket extends InternalMultiBucketAggregation.InternalBucketWritable implements ToXContent { private final Object key; private final long docCount; private final InternalAggregations aggregations; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerService.java index 350f45afb9e1f..1b28ebbb3eec6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/DelegatingCircuitBreakerService.java @@ -40,10 +40,12 @@ * At the time of writing circuit breakers are a global gauge.) * * After the map phase and before reduce, the {@link ItemSetMapReduceAggregator} creates instances of - * {@link InternalItemSetMapReduceAggregation}, see {@link ItemSetMapReduceAggregator#buildAggregations(long[])}. + * {@link InternalItemSetMapReduceAggregation}, see + * {@link ItemSetMapReduceAggregator#buildAggregations(org.elasticsearch.common.util.LongArray)}. * * (Note 1: Instead of keeping the existing instance, it would have been possible to deep-copy the object like - * {@link CardinalityAggregator#buildAggregations(long[])}. I decided against this approach mainly because the deep-copy isn't + * {@link CardinalityAggregator#buildAggregations(org.elasticsearch.common.util.LongArray)}. + * I decided against this approach mainly because the deep-copy isn't * secured by circuit breakers, meaning the node could run out of memory during the deep-copy.) * (Note 2: Between {@link ItemSetMapReduceAggregator#doClose()} and serializing {@link InternalItemSetMapReduceAggregation} * memory accounting is broken, meaning the agg context gets closed and bytes get returned to the circuit breaker before memory is diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java index 0f9555c77341f..1a5e5d7a0790e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; @@ -117,9 +118,9 @@ public InternalAggregation buildEmptyAggregation() { } @Override - public final InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; - for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { + public final InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + InternalAggregation[] results = new InternalAggregation[Math.toIntExact(owningBucketOrds.size())]; + for (int ordIdx = 0; ordIdx < results.length; ordIdx++) { results[ordIdx] = buildAggregation(ordIdx); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index 0ccdd1eb64601..2a6d6eb329503 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -27,13 +27,11 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.script.Script; -import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; @@ -147,7 +145,6 @@ protected void doExecute(ActionListener listener) { reindexRequest.setSourceQuery(config.getSource().getParsedQuery()); reindexRequest.getSearchRequest().allowPartialSearchResults(false); reindexRequest.getSearchRequest().source().fetchSource(config.getSource().getSourceFiltering()); - reindexRequest.getSearchRequest().source().sort(SeqNoFieldMapper.NAME, SortOrder.ASC); reindexRequest.setDestIndex(config.getDest().getIndex()); // We explicitly set slices to 1 as we cannot parallelize in order to have the incremental id diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/ScaleFromZeroFeatureFlag.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/ScaleFromZeroFeatureFlag.java deleted file mode 100644 index 4c446b65db9dd..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/ScaleFromZeroFeatureFlag.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.ml.inference.adaptiveallocations; - -import org.elasticsearch.common.util.FeatureFlag; - -public class ScaleFromZeroFeatureFlag { - private ScaleFromZeroFeatureFlag() {} - - private static final FeatureFlag FEATURE_FLAG = new FeatureFlag("ml_scale_from_zero"); - - public static boolean isEnabled() { - return FEATURE_FLAG.isEnabled(); - } -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index deb645ff96133..4a9d65481d412 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -981,7 +981,7 @@ private static Set countInferenceProcessors(IngestMetadata ingestMetadat return allReferencedModelKeys; } ingestMetadata.getPipelines().forEach((pipelineId, pipelineConfiguration) -> { - Object processors = pipelineConfiguration.getConfigAsMap().get("processors"); + Object processors = pipelineConfiguration.getConfig().get("processors"); if (processors instanceof List) { for (Object processor : (List) processors) { if (processor instanceof Map) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/NlpTokenizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/NlpTokenizer.java index 0b4a5b651d8d4..930dbee304790 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/NlpTokenizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/NlpTokenizer.java @@ -331,6 +331,29 @@ public List tokenize(String seq1, String seq2, Tokeni tokenIdsSeq2 = tokenIdsSeq2.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq1.size()); tokenPositionMapSeq2 = tokenPositionMapSeq2.subList(0, maxSequenceLength() - extraTokens - tokenIdsSeq1.size()); } + case BALANCED -> { + isTruncated = true; + int firstSequenceLength = 0; + + if (tokenIdsSeq2.size() > (maxSequenceLength() - getNumExtraTokensForSeqPair()) / 2) { + firstSequenceLength = min(tokenIdsSeq1.size(), (maxSequenceLength() - getNumExtraTokensForSeqPair()) / 2); + } else { + firstSequenceLength = min( + tokenIdsSeq1.size(), + maxSequenceLength() - tokenIdsSeq2.size() - getNumExtraTokensForSeqPair() + ); + } + int secondSequenceLength = min( + tokenIdsSeq2.size(), + maxSequenceLength() - firstSequenceLength - getNumExtraTokensForSeqPair() + ); + + tokenIdsSeq1 = tokenIdsSeq1.subList(0, firstSequenceLength); + tokenPositionMapSeq1 = tokenPositionMapSeq1.subList(0, firstSequenceLength); + + tokenIdsSeq2 = tokenIdsSeq2.subList(0, secondSequenceLength); + tokenPositionMapSeq2 = tokenPositionMapSeq2.subList(0, secondSequenceLength); + } case NONE -> throw ExceptionsHelper.badRequestException( "Input too large. The tokenized input length [{}] exceeds the maximum sequence length [{}]", numTokens, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java index bbe5bea691c35..5dd7dbbffaa61 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java @@ -194,7 +194,7 @@ Reader normalize(CharSequence str) { if (charDelta < 0) { // normalised form is shorter int lastDiff = getLastCumulativeDiff(); - addOffCorrectMap(normalizedCharPos, lastDiff + charDelta); + addOffCorrectMap(normalizedCharPos, lastDiff - charDelta); } else if (charDelta > 0) { // inserted chars, add the offset in the output stream int lastDiff = getLastCumulativeDiff(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java index 31deac066cba2..01821f5582471 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/UnigramTokenizer.java @@ -367,8 +367,10 @@ List tokenize(CharSequence inputSequence, IntToIntFuncti new DelimitedToken.Encoded( Strings.format("<0x%02X>", bytes[i]), pieces[i], + // even though we are changing the number of characters in the output, we don't + // need to change the offsets. The offsets refer to the input characters offsetCorrection.apply(node.startsAtCharPos), - offsetCorrection.apply(startsAtBytes + i) + offsetCorrection.apply(endsAtChars) ) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostDataAction.java index 48c6abde3010a..0fcad773100ff 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostDataAction.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.ml.rest.job; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; @@ -51,9 +52,14 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient PostDataAction.Request request = new PostDataAction.Request(restRequest.param(Job.ID.getPreferredName())); request.setResetStart(restRequest.param(PostDataAction.Request.RESET_START.getPreferredName(), DEFAULT_RESET_START)); request.setResetEnd(restRequest.param(PostDataAction.Request.RESET_END.getPreferredName(), DEFAULT_RESET_END)); - request.setContent(restRequest.content(), restRequest.getXContentType()); + var content = restRequest.content(); + request.setContent(content, restRequest.getXContentType()); - return channel -> client.execute(PostDataAction.INSTANCE, request, new RestToXContentListener<>(channel, r -> RestStatus.ACCEPTED)); + return channel -> client.execute( + PostDataAction.INSTANCE, + request, + ActionListener.withRef(new RestToXContentListener<>(channel, r -> RestStatus.ACCEPTED), content) + ); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java index 561076c302eda..1604c47ac4754 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java @@ -36,7 +36,7 @@ public final class AggregationTestUtils { private AggregationTestUtils() {} static InternalHistogram.Bucket createHistogramBucket(long timestamp, long docCount, List subAggregations) { - return new InternalHistogram.Bucket(timestamp, docCount, false, DocValueFormat.RAW, createAggs(subAggregations)); + return new InternalHistogram.Bucket(timestamp, docCount, DocValueFormat.RAW, createAggs(subAggregations)); } static InternalComposite.InternalBucket createCompositeBucket( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java index 56cdcc88df91b..15fb2b2b81f30 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManagerTests.java @@ -138,16 +138,20 @@ public void testRunJob_TaskIsStopping() { when(task.isStopping()).thenReturn(true); when(task.getParams()).thenReturn(new StartDataFrameAnalyticsAction.TaskParams("data_frame_id", MlConfigVersion.CURRENT, false)); - processManager.runJob(task, dataFrameAnalyticsConfig, dataExtractorFactory, + processManager.runJob( + task, + dataFrameAnalyticsConfig, + dataExtractorFactory, ActionTestUtils.assertNoFailureListener(stepResponse -> { - assertThat(processManager.getProcessContextCount(), equalTo(0)); - assertThat(stepResponse.isTaskComplete(), is(true)); + assertThat(processManager.getProcessContextCount(), equalTo(0)); + assertThat(stepResponse.isTaskComplete(), is(true)); - InOrder inOrder = inOrder(task); - inOrder.verify(task).isStopping(); - inOrder.verify(task).getParams(); - verifyNoMoreInteractions(task); - })); + InOrder inOrder = inOrder(task); + inOrder.verify(task).isStopping(); + inOrder.verify(task).getParams(); + verifyNoMoreInteractions(task); + }) + ); } public void testRunJob_ProcessContextAlreadyExists() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java index 3590793b81abd..7460e17055a00 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessorTests.java @@ -10,11 +10,13 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.inference.results.TextSimilarityInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.BertTokenization; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.DebertaV2Tokenization; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.Tokenization; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.VocabularyConfig; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizationResult; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizer; +import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.DebertaV2Tokenizer; import org.elasticsearch.xpack.ml.inference.nlp.tokenizers.TokenizationResult; import org.elasticsearch.xpack.ml.inference.pytorch.results.PyTorchInferenceResult; @@ -22,6 +24,8 @@ import java.util.List; import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.BertTokenizerTests.TEST_CASED_VOCAB; +import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.DebertaV2TokenizerTests.TEST_CASE_SCORES; +import static org.elasticsearch.xpack.ml.inference.nlp.tokenizers.DebertaV2TokenizerTests.TEST_CASE_VOCAB; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -62,6 +66,33 @@ public void testProcessor() throws IOException { assertThat(result.predictedValue(), closeTo(42, 1e-6)); } + public void testBalancedTruncationWithLongInput() throws IOException { + String question = "Is Elasticsearch scalable?"; + StringBuilder longInputBuilder = new StringBuilder(); + for (int i = 0; i < 1000; i++) { + longInputBuilder.append(TEST_CASE_VOCAB.get(randomIntBetween(0, TEST_CASE_VOCAB.size() - 1))).append(i).append(" "); + } + String longInput = longInputBuilder.toString().trim(); + + DebertaV2Tokenization tokenization = new DebertaV2Tokenization(false, true, null, Tokenization.Truncate.BALANCED, -1); + DebertaV2Tokenizer tokenizer = DebertaV2Tokenizer.builder(TEST_CASE_VOCAB, TEST_CASE_SCORES, tokenization).build(); + TextSimilarityConfig textSimilarityConfig = new TextSimilarityConfig( + question, + new VocabularyConfig(""), + tokenization, + "result", + TextSimilarityConfig.SpanScoreFunction.MAX + ); + TextSimilarityProcessor processor = new TextSimilarityProcessor(tokenizer); + TokenizationResult tokenizationResult = processor.getRequestBuilder(textSimilarityConfig) + .buildRequest(List.of(longInput), "1", Tokenization.Truncate.BALANCED, -1, null) + .tokenization(); + + // Assert that the tokenization result is as expected + assertThat(tokenizationResult.anyTruncated(), is(true)); + assertThat(tokenizationResult.getTokenization(0).tokenIds().length, equalTo(512)); + } + public void testResultFunctions() { BertTokenization tokenization = new BertTokenization(false, true, 384, Tokenization.Truncate.NONE, 128); BertTokenizer tokenizer = BertTokenizer.builder(TEST_CASED_VOCAB, tokenization).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DebertaV2TokenizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DebertaV2TokenizerTests.java index bbe509da67452..fc070ec25dc68 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DebertaV2TokenizerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/DebertaV2TokenizerTests.java @@ -23,7 +23,7 @@ public class DebertaV2TokenizerTests extends ESTestCase { - private static final List TEST_CASE_VOCAB = List.of( + public static final List TEST_CASE_VOCAB = List.of( DebertaV2Tokenizer.CLASS_TOKEN, DebertaV2Tokenizer.PAD_TOKEN, DebertaV2Tokenizer.SEPARATOR_TOKEN, @@ -48,7 +48,7 @@ public class DebertaV2TokenizerTests extends ESTestCase { "<0xAD>", "▁" ); - private static final List TEST_CASE_SCORES = List.of( + public static final List TEST_CASE_SCORES = List.of( 0.0, 0.0, 0.0, @@ -94,6 +94,20 @@ public void testTokenize() throws IOException { } } + public void testTokenizeWithHiddenControlCharacters() throws IOException { + try ( + DebertaV2Tokenizer tokenizer = DebertaV2Tokenizer.builder( + TEST_CASE_VOCAB, + TEST_CASE_SCORES, + new DebertaV2Tokenization(false, false, null, Tokenization.Truncate.NONE, -1) + ).build() + ) { + TokenizationResult.Tokens tokenization = tokenizer.tokenize("\u009F\u008Fz", Tokenization.Truncate.NONE, -1, 0, null).get(0); + assertThat(tokenStrings(tokenization.tokens().get(0)), contains("▁", "z")); + + } + } + public void testSurrogatePair() throws IOException { try ( DebertaV2Tokenizer tokenizer = DebertaV2Tokenizer.builder( diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java index b69b958a27ce6..762cbffacb082 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.monitoring.rest.action; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -93,8 +94,9 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client final long intervalMillis = parseTimeValue(intervalAsString, INTERVAL).getMillis(); final MonitoringBulkRequestBuilder requestBuilder = new MonitoringBulkRequestBuilder(client); - requestBuilder.add(system, request.content(), request.getXContentType(), timestamp, intervalMillis); - return channel -> requestBuilder.execute(getRestBuilderListener(channel)); + var content = request.content(); + requestBuilder.add(system, content, request.getXContentType(), timestamp, intervalMillis); + return channel -> requestBuilder.execute(ActionListener.withRef(getRestBuilderListener(channel), content)); } @Override diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 9458442557694..f4d50df4ff613 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -572,7 +572,8 @@ public void testToXContent() throws IOException { "total_deduplicated_field_count": 0, "total_deduplicated_mapping_size_in_bytes": 0, "field_types": [], - "runtime_field_types": [] + "runtime_field_types": [], + "source_modes": {} }, "analysis": { "char_filter_types": [], diff --git a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java index 435530542c857..ca52db9331cf3 100644 --- a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java +++ b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelIndexTemplateRegistry.java @@ -10,7 +10,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ClientHelper; @@ -27,10 +26,9 @@ public OTelIndexTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry, - FeatureService featureService + NamedXContentRegistry xContentRegistry ) { - super(nodeSettings, clusterService, threadPool, client, xContentRegistry, featureService); + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); } @Override diff --git a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java index 543102330bd08..67bd8c4e002d3 100644 --- a/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java +++ b/x-pack/plugin/otel-data/src/main/java/org/elasticsearch/xpack/oteldata/OTelPlugin.java @@ -48,14 +48,7 @@ public Collection createComponents(PluginServices services) { Settings settings = services.environment().settings(); ClusterService clusterService = services.clusterService(); registry.set( - new OTelIndexTemplateRegistry( - settings, - clusterService, - services.threadPool(), - services.client(), - services.xContentRegistry(), - services.featureService() - ) + new OTelIndexTemplateRegistry(settings, clusterService, services.threadPool(), services.client(), services.xContentRegistry()) ); if (enabled) { OTelIndexTemplateRegistry registryInstance = registry.get(); diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml index 2b0d1ec536fa6..3a1ba435b8f1f 100644 --- a/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/traces-otel@mappings.yaml @@ -10,8 +10,6 @@ template: sort: field: [ "resource.attributes.host.name", "@timestamp" ] mappings: - _source: - mode: synthetic properties: trace_id: type: keyword diff --git a/x-pack/plugin/otel-data/src/main/resources/resources.yaml b/x-pack/plugin/otel-data/src/main/resources/resources.yaml index b2d30c7f85cc4..9edbe5622b3f1 100644 --- a/x-pack/plugin/otel-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin otel-data. This must be increased whenever an existing template is # changed, in order for it to be updated on Elasticsearch upgrade. -version: 6 +version: 7 component-templates: - otel@mappings diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java index 7d8a474453c4c..71e8dcbff4ee6 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java @@ -54,7 +54,7 @@ public class ProfilingIndexTemplateRegistry extends IndexTemplateRegistry { // version 11: Added 'profiling.agent.protocol' keyword mapping to profiling-hosts // version 12: Added 'profiling.agent.env_https_proxy' keyword mapping to profiling-hosts // version 13: Added 'container.id' keyword mapping to profiling-events - public static final int INDEX_TEMPLATE_VERSION = 13; + public static final int INDEX_TEMPLATE_VERSION = 14; // history for individual indices / index templates. Only bump these for breaking changes that require to create a new index public static final int PROFILING_EVENTS_VERSION = 5; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/AttributeSet.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/AttributeSet.java index 0ee291af29ae1..a44764dab2a38 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/AttributeSet.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/AttributeSet.java @@ -113,7 +113,7 @@ public T[] toArray(T[] a) { @Override public boolean add(Attribute e) { - return delegate.put(e, PRESENT) != null; + return delegate.put(e, PRESENT) == null; } @Override diff --git a/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle b/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle index 01264d7849680..43c78bfc887b7 100644 --- a/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { @@ -38,7 +36,7 @@ tasks.named("javaRestTest") { systemProperty 'test.azure.container', azureContainer systemProperty 'test.azure.key', azureKey systemProperty 'test.azure.sas_token', azureSasToken - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repositories_metering_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repositories_metering_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { diff --git a/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java b/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java index 7029a38edcb5a..d21dc4b2982f1 100644 --- a/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java +++ b/x-pack/plugin/repositories-metering-api/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/repositories/metering/azure/AzureRepositoriesMeteringIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.repositories.metering.azure; import fixture.azure.AzureHttpFixture; +import fixture.azure.MockAzureBlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Booleans; @@ -37,7 +38,8 @@ public class AzureRepositoriesMeteringIT extends AbstractRepositoriesMeteringAPI AZURE_TEST_CONTAINER, System.getProperty("test.azure.tenant_id"), System.getProperty("test.azure.client_id"), - AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT) + AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT), + MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE ); private static TestTrustStore trustStore = new TestTrustStore( diff --git a/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle b/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle index b8c345c99b895..984590f42256c 100644 --- a/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { @@ -35,7 +33,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repositories_metering" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repositories_metering" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile } diff --git a/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle b/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle index 5f2bf66f31b21..5c83e8980a474 100644 --- a/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' @@ -38,7 +44,7 @@ tasks.named("javaRestTest").configure { systemProperty 'test.s3.bucket', s3Bucket systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repositories_metering" + BuildParams.testSeed : 'base_path_integration_tests' + nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_repositories_metering" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest").configure { dependsOn "javaRestTest" diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index ba25a774ff540..e33c1cc30f355 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -444,20 +444,14 @@ private static InternalAggregation unrollMultiBucket( long key = ((InternalDateHistogram) rolled).getKey(bucket).longValue(); DocValueFormat formatter = ((InternalDateHistogram.Bucket) bucket).getFormatter(); assert bucketCount >= 0; - return new InternalDateHistogram.Bucket( - key, - bucketCount, - ((InternalDateHistogram.Bucket) bucket).getKeyed(), - formatter, - subAggs - ); + return new InternalDateHistogram.Bucket(key, bucketCount, formatter, subAggs); }); } else if (rolled instanceof InternalHistogram) { return unrollMultiBucket(rolled, original, currentTree, (bucket, bucketCount, subAggs) -> { long key = ((InternalHistogram) rolled).getKey(bucket).longValue(); DocValueFormat formatter = ((InternalHistogram.Bucket) bucket).getFormatter(); assert bucketCount >= 0; - return new InternalHistogram.Bucket(key, bucketCount, ((InternalHistogram.Bucket) bucket).getKeyed(), formatter, subAggs); + return new InternalHistogram.Bucket(key, bucketCount, formatter, subAggs); }); } else if (rolled instanceof StringTerms) { return unrollMultiBucket(rolled, original, currentTree, (bucket, bucketCount, subAggs) -> { diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle index 6774ef920f280..de5ec42147d3f 100644 --- a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-java-rest-test' @@ -33,7 +38,7 @@ tasks.named("javaRestTest").configure { systemProperty 'test.azure.container', azureContainer systemProperty 'test.azure.key', azureKey systemProperty 'test.azure.sas_token', azureSasToken - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_searchable_snapshots_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_searchable_snapshots_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java index 610b58453716c..f65db6dab1e68 100644 --- a/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.searchablesnapshots; import fixture.azure.AzureHttpFixture; +import fixture.azure.MockAzureBlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Booleans; @@ -38,7 +39,8 @@ public class AzureSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestT AZURE_TEST_CONTAINER, System.getProperty("test.azure.tenant_id"), System.getProperty("test.azure.client_id"), - AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT) + AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT), + MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE ); private static TestTrustStore trustStore = new TestTrustStore( diff --git a/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle b/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle index 3099f0787998e..0340453d0840b 100644 --- a/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-java-rest-test' @@ -29,7 +34,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_searchable_snapshots_tests" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_searchable_snapshots_tests" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile diff --git a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle index 52ea873ae53bf..b41e0f8dcc1cf 100644 --- a/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' apply plugin: 'elasticsearch.internal-available-ports' @@ -28,6 +26,6 @@ restResources { tasks.named("javaRestTest").configure { usesDefaultDistribution() - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/plugin/searchable-snapshots/qa/minio/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/minio/MinioSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/minio/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/minio/MinioSearchableSnapshotsIT.java index 5c2b19fe75a07..53f1a9a88e10e 100644 --- a/x-pack/plugin/searchable-snapshots/qa/minio/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/minio/MinioSearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/minio/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/minio/MinioSearchableSnapshotsIT.java @@ -21,7 +21,12 @@ @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class MinioSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { - public static final MinioTestContainer minioFixture = new MinioTestContainer(); + public static final MinioTestContainer minioFixture = new MinioTestContainer( + true, + "s3_test_access_key", + "s3_test_secret_key", + "bucket" + ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle index 8919ddc6d29fd..1659c592e5e64 100644 --- a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' @@ -40,7 +46,7 @@ tasks.named("javaRestTest").configure { systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) - nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_searchable_snapshots_tests" + BuildParams.testSeed : 'base_path_integration_tests' + nonInputProperties.systemProperty 'test.s3.base_path', s3BasePath ? s3BasePath + "_searchable_snapshots_tests" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java new file mode 100644 index 0000000000000..989e5468c4fb3 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java @@ -0,0 +1,288 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchablesnapshots.s3; + +import fixture.s3.S3HttpFixture; +import io.netty.handler.codec.http.HttpMethod; + +import org.apache.http.client.methods.HttpPut; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.function.UnaryOperator; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.Matchers.allOf; + +public class S3SearchableSnapshotsCredentialsReloadIT extends ESRestTestCase { + + private static final String BUCKET = "S3SearchableSnapshotsCredentialsReloadIT-bucket"; + private static final String BASE_PATH = "S3SearchableSnapshotsCredentialsReloadIT-base-path"; + + private static volatile String repositoryAccessKey; + + public static final S3HttpFixture s3Fixture = new S3HttpFixture( + true, + BUCKET, + BASE_PATH, + S3HttpFixture.mutableAccessKey(() -> repositoryAccessKey) + ); + + private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "trial") + .keystore(keystoreSettings) + .setting("xpack.searchable.snapshot.shared_cache.size", "4kB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "4kB") + .setting("xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive", "0ms") + .setting("xpack.security.enabled", "false") + .systemProperty("es.allow_insecure_settings", "true") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(cluster); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Before + public void skipFips() { + assumeFalse("getting these tests to run in a FIPS JVM is kinda fiddly and we don't really need the extra coverage", inFipsJvm()); + } + + public void testReloadCredentialsFromKeystore() throws IOException { + final TestHarness testHarness = new TestHarness(); + testHarness.putRepository(); + + // Set up initial credentials + final String accessKey1 = randomIdentifier(); + repositoryAccessKey = accessKey1; + keystoreSettings.put("s3.client.default.access_key", accessKey1); + keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + testHarness.createFrozenSearchableSnapshotIndex(); + + // Verify searchable snapshot functionality + testHarness.ensureSearchSuccess(); + + // Rotate credentials in blob store + logger.info("--> rotate credentials"); + final String accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); + repositoryAccessKey = accessKey2; + + // Ensure searchable snapshot now does not work due to invalid credentials + logger.info("--> expect failure"); + testHarness.ensureSearchFailure(); + + // Set up refreshed credentials + logger.info("--> update keystore contents"); + keystoreSettings.put("s3.client.default.access_key", accessKey2); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + // Check access using refreshed credentials + logger.info("--> expect success"); + testHarness.ensureSearchSuccess(); + } + + public void testReloadCredentialsFromAlternativeClient() throws IOException { + final TestHarness testHarness = new TestHarness(); + testHarness.putRepository(); + + // Set up credentials + final String accessKey1 = randomIdentifier(); + final String accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); + final String alternativeClient = randomValueOtherThan("default", ESTestCase::randomIdentifier); + + repositoryAccessKey = accessKey1; + keystoreSettings.put("s3.client.default.access_key", accessKey1); + keystoreSettings.put("s3.client.default.secret_key", randomIdentifier()); + keystoreSettings.put("s3.client." + alternativeClient + ".access_key", accessKey2); + keystoreSettings.put("s3.client." + alternativeClient + ".secret_key", randomIdentifier()); + cluster.updateStoredSecureSettings(); + assertOK(client().performRequest(new Request("POST", "/_nodes/reload_secure_settings"))); + + testHarness.createFrozenSearchableSnapshotIndex(); + + // Verify searchable snapshot functionality + testHarness.ensureSearchSuccess(); + + // Rotate credentials in blob store + logger.info("--> rotate credentials"); + repositoryAccessKey = accessKey2; + + // Ensure searchable snapshot now does not work due to invalid credentials + logger.info("--> expect failure"); + testHarness.ensureSearchFailure(); + + // Adjust repository to use new client + logger.info("--> update repository metadata"); + testHarness.putRepository(b -> b.put("client", alternativeClient)); + + // Check access using refreshed credentials + logger.info("--> expect success"); + testHarness.ensureSearchSuccess(); + } + + public void testReloadCredentialsFromMetadata() throws IOException { + final TestHarness testHarness = new TestHarness(); + testHarness.warningsHandler = WarningsHandler.PERMISSIVE; + + // Set up credentials + final String accessKey1 = randomIdentifier(); + final String accessKey2 = randomValueOtherThan(accessKey1, ESTestCase::randomIdentifier); + + testHarness.putRepository(b -> b.put("access_key", accessKey1).put("secret_key", randomIdentifier())); + repositoryAccessKey = accessKey1; + + testHarness.createFrozenSearchableSnapshotIndex(); + + // Verify searchable snapshot functionality + testHarness.ensureSearchSuccess(); + + // Rotate credentials in blob store + logger.info("--> rotate credentials"); + repositoryAccessKey = accessKey2; + + // Ensure searchable snapshot now does not work due to invalid credentials + logger.info("--> expect failure"); + testHarness.ensureSearchFailure(); + + // Adjust repository to use new client + logger.info("--> update repository metadata"); + testHarness.putRepository(b -> b.put("access_key", accessKey2).put("secret_key", randomIdentifier())); + + // Check access using refreshed credentials + logger.info("--> expect success"); + testHarness.ensureSearchSuccess(); + } + + private class TestHarness { + private final String mountedIndexName = randomIdentifier(); + private final String repositoryName = randomIdentifier(); + + @Nullable // to use the default + WarningsHandler warningsHandler; + + void putRepository() throws IOException { + putRepository(UnaryOperator.identity()); + } + + void putRepository(UnaryOperator settingsOperator) throws IOException { + // Register repository + final Request request = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/" + repositoryName, + (b, p) -> b.field("type", "s3") + .startObject("settings") + .value( + settingsOperator.apply( + Settings.builder().put("bucket", BUCKET).put("base_path", BASE_PATH).put("endpoint", s3Fixture.getAddress()) + ).build() + ) + .endObject() + ); + request.addParameter("verify", "false"); // because we don't have access to the blob store yet + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler)); + assertOK(client().performRequest(request)); + } + + void createFrozenSearchableSnapshotIndex() throws IOException { + // Create an index, large enough that its data is not all captured in the file headers + final String indexName = randomValueOtherThan(mountedIndexName, ESTestCase::randomIdentifier); + createIndex(indexName, indexSettings(1, 0).build()); + try (var bodyStream = new ByteArrayOutputStream()) { + for (int i = 0; i < 1024; i++) { + try (XContentBuilder bodyLineBuilder = new XContentBuilder(XContentType.JSON.xContent(), bodyStream)) { + bodyLineBuilder.startObject().startObject("index").endObject().endObject(); + } + bodyStream.write(0x0a); + try (XContentBuilder bodyLineBuilder = new XContentBuilder(XContentType.JSON.xContent(), bodyStream)) { + bodyLineBuilder.startObject().field("foo", "bar").endObject(); + } + bodyStream.write(0x0a); + } + bodyStream.flush(); + final Request request = new Request("PUT", indexName + "/_bulk"); + request.setEntity(new ByteArrayEntity(bodyStream.toByteArray(), ContentType.APPLICATION_JSON)); + client().performRequest(request); + } + + // Take a snapshot and delete the original index + final String snapshotName = randomIdentifier(); + final Request createSnapshotRequest = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repositoryName + '/' + snapshotName); + createSnapshotRequest.addParameter("wait_for_completion", "true"); + createSnapshotRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler)); + assertOK(client().performRequest(createSnapshotRequest)); + + deleteIndex(indexName); + + // Mount the snapshotted index as a searchable snapshot + final Request mountRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/" + repositoryName + "/" + snapshotName + "/_mount", + (b, p) -> b.field("index", indexName).field("renamed_index", mountedIndexName) + ); + mountRequest.addParameter("wait_for_completion", "true"); + mountRequest.addParameter("storage", "shared_cache"); + assertOK(client().performRequest(mountRequest)); + ensureGreen(mountedIndexName); + } + + void ensureSearchSuccess() throws IOException { + final Request searchRequest = new Request("GET", mountedIndexName + "/_search"); + searchRequest.addParameter("size", "10000"); + assertEquals( + "bar", + ObjectPath.createFromResponse(assertOK(client().performRequest(searchRequest))).evaluate("hits.hits.0._source.foo") + ); + } + + void ensureSearchFailure() throws IOException { + assertOK(client().performRequest(new Request("POST", "/_searchable_snapshots/cache/clear"))); + final Request searchRequest = new Request("GET", mountedIndexName + "/_search"); + searchRequest.addParameter("size", "10000"); + assertThat( + expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)).getMessage(), + allOf( + containsString("Access denied"), + containsString("Status Code: 403"), + containsString("Error Code: AccessDenied"), + containsString("failed to read data from cache") + ) + ); + } + } + +} diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java index 9dadb75e87cef..c378fef9428ba 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotEnableAllocationDeciderIntegTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xpack.searchablesnapshots.BaseSearchableSnapshotsIntegTestCase; -import org.elasticsearch.xpack.searchablesnapshots.allocation.decider.SearchableSnapshotEnableAllocationDecider; import org.hamcrest.Matchers; import java.util.List; @@ -31,9 +30,7 @@ public void testAllocationDisabled() throws Exception { final String restoredIndexName = setupMountedIndex(); int numPrimaries = getNumShards(restoredIndexName).numPrimaries; setEnableAllocation(EnableAllocationDecider.Allocation.PRIMARIES); - if (randomBoolean()) { - setAllocateOnRollingRestart(false); - } + Set indexNodes = internalCluster().nodesInclude(restoredIndexName); for (String indexNode : indexNodes) { internalCluster().restartNode(indexNode); @@ -43,16 +40,13 @@ public void testAllocationDisabled() throws Exception { .actionGet(); assertThat(response.getUnassignedShards(), Matchers.equalTo(numPrimaries)); - setAllocateOnRollingRestart(true); + setEnableAllocation(null); ensureGreen(restoredIndexName); } public void testAllocateOnRollingRestartEnabled() throws Exception { final String restoredIndexName = setupMountedIndex(); - if (randomBoolean()) { - setEnableAllocation(EnableAllocationDecider.Allocation.PRIMARIES); - } - setAllocateOnRollingRestart(true); + setEnableAllocation(null); Set indexNodes = internalCluster().nodesInclude(restoredIndexName); for (String indexNode : indexNodes) { internalCluster().restartNode(indexNode); @@ -74,14 +68,7 @@ private String setupMountedIndex() throws Exception { } public void setEnableAllocation(EnableAllocationDecider.Allocation allocation) { - setSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, allocation.name()); - } - - public void setAllocateOnRollingRestart(boolean allocateOnRollingRestart) { - setSetting( - SearchableSnapshotEnableAllocationDecider.SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART, - Boolean.toString(allocateOnRollingRestart) - ); + setSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, allocation != null ? allocation.name() : null); } private void setSetting(Setting setting, String value) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index eabdf7c9bf46c..33982536cd634 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -304,7 +304,6 @@ public List> getSettings() { CacheService.SNAPSHOT_CACHE_SYNC_INTERVAL_SETTING, CacheService.SNAPSHOT_CACHE_MAX_FILES_TO_SYNC_AT_ONCE_SETTING, CacheService.SNAPSHOT_CACHE_SYNC_SHUTDOWN_TIMEOUT, - SearchableSnapshotEnableAllocationDecider.SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART, BlobStoreCacheMaintenanceService.SNAPSHOT_SNAPSHOT_CLEANUP_INTERVAL_SETTING, BlobStoreCacheMaintenanceService.SNAPSHOT_SNAPSHOT_CLEANUP_KEEP_ALIVE_SETTING, BlobStoreCacheMaintenanceService.SNAPSHOT_SNAPSHOT_CLEANUP_BATCH_SIZE_SETTING, @@ -544,9 +543,9 @@ public Map getRecoveryStateFactories() { return Map.of(SNAPSHOT_RECOVERY_STATE_FACTORY_KEY, SearchableSnapshotRecoveryState::new); } - public static final String CACHE_FETCH_ASYNC_THREAD_POOL_NAME = "searchable_snapshots_cache_fetch_async"; + public static final String CACHE_FETCH_ASYNC_THREAD_POOL_NAME = BlobStoreRepository.SEARCHABLE_SNAPSHOTS_CACHE_FETCH_ASYNC_THREAD_NAME; public static final String CACHE_FETCH_ASYNC_THREAD_POOL_SETTING = "xpack.searchable_snapshots.cache_fetch_async_thread_pool"; - public static final String CACHE_PREWARMING_THREAD_POOL_NAME = "searchable_snapshots_cache_prewarming"; + public static final String CACHE_PREWARMING_THREAD_POOL_NAME = BlobStoreRepository.SEARCHABLE_SNAPSHOTS_CACHE_PREWARMING_THREAD_NAME; public static final String CACHE_PREWARMING_THREAD_POOL_SETTING = "xpack.searchable_snapshots.cache_prewarming_thread_pool"; public static ScalingExecutorBuilder[] executorBuilders(Settings settings) { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java index 1e360fc2f3503..b6a301a01c782 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/decider/SearchableSnapshotEnableAllocationDecider.java @@ -15,50 +15,26 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.UpdateForV9; public class SearchableSnapshotEnableAllocationDecider extends AllocationDecider { static final String NAME = "searchable_snapshots_enable"; - /** - * This setting describes whether searchable snapshots are allocated during rolling restarts. For now, whether a rolling restart is - * ongoing is determined by cluster.routing.allocation.enable=primaries. Notice that other values for that setting except "all" mean - * that no searchable snapshots are allocated anyway. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - // xpack.searchable.snapshot.allocate_on_rolling_restart was only temporary, remove it in the next major - public static final Setting SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART = Setting.boolSetting( - "xpack.searchable.snapshot.allocate_on_rolling_restart", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope, - Setting.Property.Deprecated - ); - private volatile EnableAllocationDecider.Allocation enableAllocation; - private volatile boolean allocateOnRollingRestart; public SearchableSnapshotEnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.enableAllocation = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); - this.allocateOnRollingRestart = SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART.get(settings); clusterSettings.addSettingsUpdateConsumer( EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation ); - clusterSettings.addSettingsUpdateConsumer(SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART, this::setAllocateOnRollingRestart); } private void setEnableAllocation(EnableAllocationDecider.Allocation allocation) { this.enableAllocation = allocation; } - private void setAllocateOnRollingRestart(boolean allocateOnRollingRestart) { - this.allocateOnRollingRestart = allocateOnRollingRestart; - } - @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return canAllocate(shardRouting, allocation); @@ -73,25 +49,14 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); if (indexMetadata.isSearchableSnapshot()) { EnableAllocationDecider.Allocation enableAllocationCopy = this.enableAllocation; - boolean allocateOnRollingRestartCopy = this.allocateOnRollingRestart; if (enableAllocationCopy == EnableAllocationDecider.Allocation.PRIMARIES) { - if (allocateOnRollingRestartCopy == false) { - return allocation.decision( - Decision.NO, - NAME, - "no allocations of searchable snapshots allowed during rolling restart due to [%s=%s] and [%s=false]", - EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), - enableAllocationCopy, - SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART.getKey() - ); - } else { - return allocation.decision( - Decision.YES, - NAME, - "allocate on rolling restart enabled [%s=true]", - SEARCHABLE_SNAPSHOTS_ALLOCATE_ON_ROLLING_RESTART.getKey() - ); - } + return allocation.decision( + Decision.NO, + NAME, + "no allocations of searchable snapshots allowed during rolling restart due to [%s=%s]", + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), + enableAllocationCopy + ); } else { return allocation.decision( Decision.YES, diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/BlobContainerSupplier.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/BlobContainerSupplier.java new file mode 100644 index 0000000000000..335c8e311ace6 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/BlobContainerSupplier.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchablesnapshots.store; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.OperationPurpose; +import org.elasticsearch.common.blobstore.support.FilterBlobContainer; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; + +import java.io.IOException; +import java.io.InputStream; +import java.util.function.Supplier; + +public class BlobContainerSupplier implements Supplier { + + private static final Logger logger = LogManager.getLogger(BlobContainerSupplier.class); + + private final Supplier repositorySupplier; + private final IndexId indexId; + private final int shardId; + + private volatile LastKnownState lastKnownState = new LastKnownState(null, null); + + public BlobContainerSupplier(Supplier repositorySupplier, IndexId indexId, int shardId) { + this.repositorySupplier = repositorySupplier; + this.indexId = indexId; + this.shardId = shardId; + } + + @Override + public BlobContainer get() { + final LastKnownState lastKnownState = this.lastKnownState; + final BlobStoreRepository currentRepository = repositorySupplier.get(); + + if (lastKnownState.blobStoreRepository() == currentRepository) { + return lastKnownState.blobContainer(); + } else { + return refreshAndGet(); + } + } + + private synchronized BlobContainer refreshAndGet() { + final BlobStoreRepository currentRepository = repositorySupplier.get(); + if (lastKnownState.blobStoreRepository() == currentRepository) { + return lastKnownState.blobContainer(); + } else { + logger.debug("creating new blob container [{}][{}][{}]", currentRepository.getMetadata().name(), indexId, shardId); + final BlobContainer newContainer = new RateLimitingBlobContainer( + currentRepository, + currentRepository.shardContainer(indexId, shardId) + ); + lastKnownState = new LastKnownState(currentRepository, newContainer); + return newContainer; + } + } + + private record LastKnownState(BlobStoreRepository blobStoreRepository, BlobContainer blobContainer) {} + + /** + * A {@link FilterBlobContainer} that uses {@link BlobStoreRepository#maybeRateLimitRestores(InputStream)} to limit the rate at which + * blobs are read from the repository. + */ + private static class RateLimitingBlobContainer extends FilterBlobContainer { + + private final BlobStoreRepository blobStoreRepository; + + RateLimitingBlobContainer(BlobStoreRepository blobStoreRepository, BlobContainer blobContainer) { + super(blobContainer); + this.blobStoreRepository = blobStoreRepository; + } + + @Override + protected BlobContainer wrapChild(BlobContainer child) { + return new RateLimitingBlobContainer(blobStoreRepository, child); + } + + @Override + public InputStream readBlob(OperationPurpose purpose, String blobName) throws IOException { + return blobStoreRepository.maybeRateLimitRestores(super.readBlob(purpose, blobName)); + } + + @Override + public InputStream readBlob(OperationPurpose purpose, String blobName, long position, long length) throws IOException { + return blobStoreRepository.maybeRateLimitRestores(super.readBlob(purpose, blobName, position, length)); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/RepositorySupplier.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/RepositorySupplier.java new file mode 100644 index 0000000000000..63522ce2309a1 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/RepositorySupplier.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.searchablesnapshots.store; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; + +import java.util.Map; +import java.util.Objects; +import java.util.function.Supplier; + +public class RepositorySupplier implements Supplier { + + private static final Logger logger = LogManager.getLogger(BlobContainerSupplier.class); + + private final RepositoriesService repositoriesService; + + private final String repositoryName; + + @Nullable // if repository specified only by name + private final String repositoryUuid; + + private volatile String repositoryNameHint; + + public RepositorySupplier(RepositoriesService repositoriesService, String repositoryName, String repositoryUuid) { + this.repositoriesService = Objects.requireNonNull(repositoriesService); + this.repositoryName = Objects.requireNonNull(repositoryName); + this.repositoryUuid = repositoryUuid; + this.repositoryNameHint = repositoryName; + } + + @Override + public BlobStoreRepository get() { + return SearchableSnapshots.getSearchableRepository(getRepository()); + } + + private Repository getRepository() { + if (repositoryUuid == null) { + // repository containing pre-7.12 snapshots has no UUID so we assume it matches by name + final Repository repository = repositoriesService.repository(repositoryName); + assert repository.getMetadata().name().equals(repositoryName) : repository.getMetadata().name() + " vs " + repositoryName; + return repository; + } + + final Map repositoriesByName = repositoriesService.getRepositories(); + + final String currentRepositoryNameHint = repositoryNameHint; + final Repository repositoryByLastKnownName = repositoriesByName.get(currentRepositoryNameHint); + if (repositoryByLastKnownName != null) { + final var foundRepositoryUuid = repositoryByLastKnownName.getMetadata().uuid(); + if (Objects.equals(repositoryUuid, foundRepositoryUuid)) { + return repositoryByLastKnownName; + } + } + + for (final Repository repository : repositoriesByName.values()) { + if (repository.getMetadata().uuid().equals(repositoryUuid)) { + final var newRepositoryName = repository.getMetadata().name(); + logger.debug( + "getRepository: repository [{}] with uuid [{}] replacing repository [{}]", + newRepositoryName, + repositoryUuid, + currentRepositoryNameHint + ); + repositoryNameHint = repository.getMetadata().name(); + return repository; + } + } + + throw new RepositoryMissingException("uuid [" + repositoryUuid + "], original name [" + repositoryName + "]"); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java index b56cd28e9dc6c..bbdf371e1ed7b 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectory.java @@ -24,8 +24,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.OperationPurpose; -import org.elasticsearch.common.blobstore.support.FilterBlobContainer; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.settings.Settings; @@ -43,8 +41,6 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.threadpool.ThreadPool; @@ -62,7 +58,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; @@ -134,7 +129,6 @@ public class SearchableSnapshotDirectory extends BaseDirectory { // volatile fields are updated once under `this` lock, all together, iff loaded is not true. private volatile BlobStoreIndexShardSnapshot snapshot; - private volatile BlobContainer blobContainer; private volatile boolean loaded; private volatile SearchableSnapshotRecoveryState recoveryState; @@ -182,7 +176,6 @@ public SearchableSnapshotDirectory( private synchronized boolean invariant() { assert loaded != (snapshot == null); - assert loaded != (blobContainer == null); assert loaded != (recoveryState == null); return true; } @@ -212,7 +205,6 @@ public boolean loadSnapshot( synchronized (this) { alreadyLoaded = this.loaded; if (alreadyLoaded == false) { - this.blobContainer = blobContainerSupplier.get(); this.snapshot = snapshotSupplier.get(); this.loaded = true; cleanExistingRegularShardFiles(); @@ -226,14 +218,12 @@ public boolean loadSnapshot( return alreadyLoaded == false; } - @Nullable public BlobContainer blobContainer() { - final BlobContainer blobContainer = this.blobContainer; + final BlobContainer blobContainer = blobContainerSupplier.get(); assert blobContainer != null; return blobContainer; } - @Nullable public BlobStoreIndexShardSnapshot snapshot() { final BlobStoreIndexShardSnapshot snapshot = this.snapshot; assert snapshot != null; @@ -590,23 +580,15 @@ public static Directory create( ); } - Repository repository; - final String repositoryName; - if (SNAPSHOT_REPOSITORY_UUID_SETTING.exists(indexSettings.getSettings())) { - repository = repositoryByUuid( - repositories.getRepositories(), - SNAPSHOT_REPOSITORY_UUID_SETTING.get(indexSettings.getSettings()), - SNAPSHOT_REPOSITORY_NAME_SETTING.get(indexSettings.getSettings()) - ); - repositoryName = repository.getMetadata().name(); - } else { - // repository containing pre-7.12 snapshots has no UUID so we assume it matches by name - repositoryName = SNAPSHOT_REPOSITORY_NAME_SETTING.get(indexSettings.getSettings()); - repository = repositories.repository(repositoryName); - assert repository.getMetadata().name().equals(repositoryName) : repository.getMetadata().name() + " vs " + repositoryName; - } + final Supplier repositorySupplier = new RepositorySupplier( + repositories, + SNAPSHOT_REPOSITORY_NAME_SETTING.get(indexSettings.getSettings()), + SNAPSHOT_REPOSITORY_UUID_SETTING.exists(indexSettings.getSettings()) + ? SNAPSHOT_REPOSITORY_UUID_SETTING.get(indexSettings.getSettings()) + : null + ); - final BlobStoreRepository blobStoreRepository = SearchableSnapshots.getSearchableRepository(repository); + final BlobStoreRepository initialRepository = repositorySupplier.get(); final IndexId indexId = new IndexId( SNAPSHOT_INDEX_NAME_SETTING.get(indexSettings.getSettings()), @@ -617,14 +599,14 @@ public static Directory create( SNAPSHOT_SNAPSHOT_ID_SETTING.get(indexSettings.getSettings()) ); - final LazyInitializable lazyBlobContainer = new LazyInitializable<>( - () -> new RateLimitingBlobContainer( - blobStoreRepository, - blobStoreRepository.shardContainer(indexId, shardPath.getShardId().id()) - ) + final Supplier blobContainerSupplier = new BlobContainerSupplier( + repositorySupplier, + indexId, + shardPath.getShardId().id() ); + final LazyInitializable lazySnapshot = new LazyInitializable<>( - () -> blobStoreRepository.loadShardSnapshot(lazyBlobContainer.getOrCompute(), snapshotId) + () -> repositorySupplier.get().loadShardSnapshot(blobContainerSupplier.get(), snapshotId) ); final Path cacheDir = CacheService.getShardCachePath(shardPath).resolve(snapshotId.getUUID()); @@ -632,10 +614,10 @@ public static Directory create( return new InMemoryNoOpCommitDirectory( new SearchableSnapshotDirectory( - lazyBlobContainer::getOrCompute, + blobContainerSupplier, lazySnapshot::getOrCompute, blobStoreCacheService, - repositoryName, + initialRepository.getMetadata().name(), snapshotId, indexId, shardPath.getShardId(), @@ -690,42 +672,4 @@ public void putCachedBlob(String name, ByteRange range, BytesReference content, public SharedBlobCacheService.CacheFile getFrozenCacheFile(String fileName, long length) { return sharedBlobCacheService.getCacheFile(createCacheKey(fileName), length); } - - private static Repository repositoryByUuid(Map repositories, String repositoryUuid, String originalName) { - for (Repository repository : repositories.values()) { - if (repository.getMetadata().uuid().equals(repositoryUuid)) { - return repository; - } - } - throw new RepositoryMissingException("uuid [" + repositoryUuid + "], original name [" + originalName + "]"); - } - - /** - * A {@link FilterBlobContainer} that uses {@link BlobStoreRepository#maybeRateLimitRestores(InputStream)} to limit the rate at which - * blobs are read from the repository. - */ - private static class RateLimitingBlobContainer extends FilterBlobContainer { - - private final BlobStoreRepository blobStoreRepository; - - RateLimitingBlobContainer(BlobStoreRepository blobStoreRepository, BlobContainer blobContainer) { - super(blobContainer); - this.blobStoreRepository = blobStoreRepository; - } - - @Override - protected BlobContainer wrapChild(BlobContainer child) { - return new RateLimitingBlobContainer(blobStoreRepository, child); - } - - @Override - public InputStream readBlob(OperationPurpose purpose, String blobName) throws IOException { - return blobStoreRepository.maybeRateLimitRestores(super.readBlob(purpose, blobName)); - } - - @Override - public InputStream readBlob(OperationPurpose purpose, String blobName, long position, long length) throws IOException { - return blobStoreRepository.maybeRateLimitRestores(super.readBlob(purpose, blobName, position, length)); - } - } } diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index dcf3c7305dbc7..d450a38dd1d29 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.build' @@ -36,7 +42,7 @@ tasks.named("test").configure { systemProperty 'tests.security.manager', 'false' // the main code under test runs without the SecurityManager } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { tasks.named("test").configure { enabled = false } diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle index f751fcd0a655d..f53ff7027f126 100644 --- a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle @@ -6,7 +6,7 @@ */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' // See the build.gradle file in the parent directory for an explanation of this unusual build diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle index c4c0f2ebd2fe1..d24299a3847da 100644 --- a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle @@ -6,7 +6,7 @@ */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' // See the build.gradle file in the parent directory for an explanation of this unusual build diff --git a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle index 580ca45055219..4418bd32e64cf 100644 --- a/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle +++ b/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle @@ -6,7 +6,7 @@ */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' // See the build.gradle file in the parent directory for an explanation of this unusual build diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle index 5957216a3e12d..72deed1af72dd 100644 --- a/x-pack/plugin/security/qa/basic-enable-security/build.gradle +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { @@ -16,7 +14,7 @@ dependencies { tasks.named("javaRestTest").configure { // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) usesDefaultDistribution() } diff --git a/x-pack/plugin/security/qa/jwt-realm/build.gradle b/x-pack/plugin/security/qa/jwt-realm/build.gradle index bc7178f11d9fc..1f7b7c1038fad 100644 --- a/x-pack/plugin/security/qa/jwt-realm/build.gradle +++ b/x-pack/plugin/security/qa/jwt-realm/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' diff --git a/x-pack/plugin/security/qa/multi-cluster/build.gradle b/x-pack/plugin/security/qa/multi-cluster/build.gradle index b8eccb14819a4..5b682cfdccade 100644 --- a/x-pack/plugin/security/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/security/qa/multi-cluster/build.gradle @@ -5,7 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -35,7 +34,7 @@ tasks.named("javaRestTest") { exclude '**/RemoteClusterSecurityBWCToRCS2ClusterRestIT.class' } -BuildParams.bwcVersions.withWireCompatible() { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible() { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1MissingIndicesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1MissingIndicesIT.java new file mode 100644 index 0000000000000..8bccc2e3c5c23 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1MissingIndicesIT.java @@ -0,0 +1,574 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matchers; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +/** + * Tests cross-cluster ES|QL queries under RCS1.0 security model for cases where index expressions do not match + * to ensure handling of those matches the expected rules defined in EsqlSessionCrossClusterUtils. + */ +public class CrossClusterEsqlRCS1MissingIndicesIT extends AbstractRemoteClusterSecurityTestCase { + + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + + static { + // remote cluster + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + // "local" cluster + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + private static final String INDEX1 = "points"; // on local cluster only + private static final String INDEX2 = "squares"; // on local and remote clusters + + record ExpectedCluster(String clusterAlias, String indexExpression, String status, Integer totalShards) {} + + @SuppressWarnings("unchecked") + void assertExpectedClustersForMissingIndicesTests(Map responseMap, List expected) { + Map clusters = (Map) responseMap.get("_clusters"); + assertThat((int) responseMap.get("took"), greaterThan(0)); + + Map detailsMap = (Map) clusters.get("details"); + assertThat(detailsMap.size(), is(expected.size())); + + assertThat((int) clusters.get("total"), is(expected.size())); + assertThat((int) clusters.get("successful"), is((int) expected.stream().filter(ec -> ec.status().equals("successful")).count())); + assertThat((int) clusters.get("skipped"), is((int) expected.stream().filter(ec -> ec.status().equals("skipped")).count())); + assertThat((int) clusters.get("failed"), is((int) expected.stream().filter(ec -> ec.status().equals("failed")).count())); + + for (ExpectedCluster expectedCluster : expected) { + Map clusterDetails = (Map) detailsMap.get(expectedCluster.clusterAlias()); + String msg = expectedCluster.clusterAlias(); + + assertThat(msg, (int) clusterDetails.get("took"), greaterThan(0)); + assertThat(msg, clusterDetails.get("status"), is(expectedCluster.status())); + Map shards = (Map) clusterDetails.get("_shards"); + if (expectedCluster.totalShards() == null) { + assertThat(msg, (int) shards.get("total"), greaterThan(0)); + } else { + assertThat(msg, (int) shards.get("total"), is(expectedCluster.totalShards())); + } + + if (expectedCluster.status().equals("successful")) { + assertThat((int) shards.get("successful"), is((int) shards.get("total"))); + assertThat((int) shards.get("skipped"), is(0)); + + } else if (expectedCluster.status().equals("skipped")) { + assertThat((int) shards.get("successful"), is(0)); + assertThat((int) shards.get("skipped"), is((int) shards.get("total"))); + ArrayList failures = (ArrayList) clusterDetails.get("failures"); + assertThat(failures.size(), is(1)); + Map failure1 = (Map) failures.get(0); + Map innerReason = (Map) failure1.get("reason"); + String expectedMsg = "Unknown index [" + expectedCluster.indexExpression() + "]"; + assertThat(innerReason.get("reason").toString(), containsString(expectedMsg)); + assertThat(innerReason.get("type").toString(), containsString("verification_exception")); + + } else { + fail(msg + "; Unexpected status: " + expectedCluster.status()); + } + // currently failed shards is always zero - change this once we start allowing partial data for individual shard failures + assertThat((int) shards.get("failed"), is(0)); + } + } + + @SuppressWarnings("unchecked") + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableTrue() throws Exception { + setupRolesAndPrivileges(); + setupIndex(); + + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, true, randomBoolean(), true); + + // missing concrete local index is an error + { + String q = Strings.format("FROM nomatch,%s:%s | STATS count(*)", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), Matchers.containsString("Unknown index [nomatch]")); + } + + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String q = Strings.format("FROM %s,%s:nomatch | STATS count(*)", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch", "skipped", 0) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch", "skipped", 0) + ) + ); + } + + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", null) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", 0) + ) + ); + } + + // since at least one index of the query matches on some cluster, a missing wildcarded index on skip_un=true is not an error + { + String q = Strings.format("FROM %s,%s:nomatch*", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch*", "skipped", 0) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", INDEX1, "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch*", "skipped", 0) + ) + ); + } + + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true + { + // with non-matching concrete index + String q = Strings.format("FROM %s:nomatch", REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true and the + // index was wildcarded + { + String q = Strings.format("FROM %s:nomatch*", REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all + { + String localExpr = randomFrom("nomatch", "nomatch*"); + String remoteExpr = randomFrom("nomatch", "nomatch*"); + String q = Strings.format("FROM %s,%s:%s", localExpr, REMOTE_CLUSTER_ALIAS, remoteExpr); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + } + + // TODO uncomment and test in follow-on PR which does skip_unavailable handling at execution time + // { + // String q = Strings.format("FROM %s,%s:nomatch,%s:%s*", INDEX1, REMOTE_CLUSTER_ALIAS, REMOTE_CLUSTER_ALIAS, INDEX2); + // + // String limit1 = q + " | LIMIT 1"; + // Response response = client().performRequest(esqlRequest(limit1)); + // assertOK(response); + // + // Map map = responseAsMap(response); + // assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + // assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + // + // assertExpectedClustersForMissingIndicesTests(map, + // List.of( + // new ExpectedCluster("(local)", INDEX1, "successful", null), + // new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch," + INDEX2 + "*", "skipped", 0) + // ) + // ); + // + // String limit0 = q + " | LIMIT 0"; + // response = client().performRequest(esqlRequest(limit0)); + // assertOK(response); + // + // map = responseAsMap(response); + // assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + // assertThat(((ArrayList) map.get("values")).size(), is(0)); + // + // assertExpectedClustersForMissingIndicesTests(map, + // List.of( + // new ExpectedCluster("(local)", INDEX1, "successful", 0), + // new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "nomatch," + INDEX2 + "*", "skipped", 0) + // ) + // ); + // } + } + + @SuppressWarnings("unchecked") + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableFalse() throws Exception { + // Remote cluster is closed and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + setupRolesAndPrivileges(); + setupIndex(); + + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, true, randomBoolean(), false); + + // missing concrete local index is an error + { + String q = Strings.format("FROM nomatch,%s:%s | STATS count(*)", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index [nomatch]")); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString("Unknown index [nomatch]")); + } + + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String q = Strings.format("FROM %s,%s:nomatch | STATS count(*)", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String q = Strings.format("FROM nomatch*,%s:%s", REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + Response response = client().performRequest(esqlRequest(limit1)); + assertOK(response); + + Map map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", null) + ) + ); + + String limit0 = q + " | LIMIT 0"; + response = client().performRequest(esqlRequest(limit0)); + assertOK(response); + + map = responseAsMap(response); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), is(0)); + + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, INDEX2, "successful", 0) + ) + ); + } + + // query is fatal since the remote cluster has skip_unavailable=false and has no matching indices + { + String q = Strings.format("FROM %s,%s:nomatch*", INDEX1, REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), Matchers.containsString(Strings.format("Unknown index [%s:nomatch*]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all + { + // with non-matching concrete index + String q = Strings.format("FROM %s:nomatch", REMOTE_CLUSTER_ALIAS); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + + // an error is thrown if there are no matching indices at all + { + String localExpr = randomFrom("nomatch", "nomatch*"); + String remoteExpr = randomFrom("nomatch", "nomatch*"); + String q = Strings.format("FROM %s,%s:%s", localExpr, REMOTE_CLUSTER_ALIAS, remoteExpr); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + + String limit0 = q + " | LIMIT 0"; + e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + } + + // error since the remote cluster with skip_unavailable=false specified a concrete index that is not found + { + String q = Strings.format("FROM %s,%s:nomatch,%s:%s*", INDEX1, REMOTE_CLUSTER_ALIAS, REMOTE_CLUSTER_ALIAS, INDEX2); + + String limit1 = q + " | LIMIT 1"; + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit1))); + assertThat(e.getMessage(), containsString(Strings.format("no such index [nomatch]", REMOTE_CLUSTER_ALIAS))); + assertThat(e.getMessage(), containsString(Strings.format("index_not_found_exception", REMOTE_CLUSTER_ALIAS))); + + // TODO: in follow on PR, add support for throwing a VerificationException from this scenario + // String limit0 = q + " | LIMIT 0"; + // e = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(limit0))); + // assertThat(e.getMessage(), containsString(Strings.format("Unknown index [%s:nomatch]", REMOTE_CLUSTER_ALIAS))); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void setupIndex() throws IOException { + Request createIndex = new Request("PUT", INDEX1); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "score": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "points" } } + { "id": 1, "score": 75} + { "index": { "_index": "points" } } + { "id": 2, "score": 125} + { "index": { "_index": "points" } } + { "id": 3, "score": 100} + { "index": { "_index": "points" } } + { "id": 4, "score": 50} + { "index": { "_index": "points" } } + { "id": 5, "score": 150} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", INDEX2); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "num": { "type": "integer" }, + "square": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "squares"}} + { "num": 1, "square": 1 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 4 } + { "index": {"_index": "squares"}} + { "num": 3, "square": 9 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 16 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(body)); + + return request; + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS2UnavailableRemotesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS2UnavailableRemotesIT.java new file mode 100644 index 0000000000000..52cd0655fbfdf --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS2UnavailableRemotesIT.java @@ -0,0 +1,316 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class CrossClusterEsqlRCS2UnavailableRemotesIT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("cluster.remote.my_remote_cluster.credentials", () -> { + if (API_KEY_MAP_REF.get() == null) { + final Map apiKeyMap = createCrossClusterAccessApiKey(""" + { + "search": [ + { + "names": ["*"] + } + ] + }"""); + API_KEY_MAP_REF.set(apiKeyMap); + } + return (String) API_KEY_MAP_REF.get().get("encoded"); + }) + .rolesFile(Resource.fromClasspath("roles.yml")) + .user(REMOTE_METRIC_USER, PASS.toString(), "read_remote_shared_metrics", false) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Before + public void setupPreRequisites() throws Exception { + setupRolesAndPrivileges(); + loadData(); + } + + public void testEsqlRcs2UnavailableRemoteScenarios() throws Exception { + clusterShutDownWithRandomSkipUnavailable(); + remoteClusterShutdownWithSkipUnavailableTrue(); + remoteClusterShutdownWithSkipUnavailableFalse(); + } + + private void clusterShutDownWithRandomSkipUnavailable() throws Exception { + // skip_unavailable is set to a random boolean value. + // However, no clusters are stopped. Hence, we do not expect any other behaviour + // other than a 200-OK. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, false, randomBoolean(), randomBoolean()); + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = performRequestWithRemoteSearchUser(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + assertThat(columns.size(), is(4)); + assertThat(values.size(), is(9)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(2)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("skipped"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("successful")); + } + + @SuppressWarnings("unchecked") + private void remoteClusterShutdownWithSkipUnavailableTrue() throws Exception { + // Remote cluster is stopped and skip unavailable is set to true. + // We expect no exception and partial results from the remaining open cluster. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, false, randomBoolean(), true); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = performRequestWithRemoteSearchUser(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + // Assert results obtained from the local cluster and that remote cluster was + // skipped. + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + + assertThat(columns.size(), is(2)); + assertThat(values.size(), is(5)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(1)); + assertThat((int) clusters.get("skipped"), is(1)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("skipped")); + + ArrayList remoteClusterFailures = (ArrayList) remoteClusterDetails.get("failures"); + assertThat(remoteClusterFailures.size(), equalTo(1)); + Map failuresMap = (Map) remoteClusterFailures.get(0); + + Map reason = (Map) failuresMap.get("reason"); + assertThat(reason.get("type").toString(), equalTo("connect_transport_exception")); + assertThat(reason.get("reason").toString(), containsString("Unable to connect to [my_remote_cluster]")); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void remoteClusterShutdownWithSkipUnavailableFalse() throws Exception { + // Remote cluster is stopped and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, false, randomBoolean(), false); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + ResponseException ex = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(esqlRequest(query))); + assertThat(ex.getMessage(), containsString("connect_transport_exception")); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["task", "hits"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["task", "hits"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["*"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void loadData() throws IOException { + Request createIndex = new Request("PUT", "task"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "time_taken_millis": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "task" } } + { "id": 1, "time_taken_millis": 39} + { "index": { "_index": "task" } } + { "id": 2, "time_taken_millis": 25} + { "index": { "_index": "task" } } + { "id": 3, "time_taken_millis": 42} + { "index": { "_index": "task" } } + { "id": 4, "time_taken_millis": 16} + { "index": { "_index": "task" } } + { "id": 5, "time_taken_millis": 62} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", "hits"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "endpoint_id": { "type": "integer" }, + "t_hits": { "type": "integer" } + } + } + } + """); + assertOK(performRequestAgainstFulfillingCluster(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "hits"}} + { "endpoint_id": 1, "t_hits": 1267 } + { "index": {"_index": "hits"}} + { "endpoint_id": 2, "t_hits": 1389 } + { "index": {"_index": "hits"}} + { "endpoint_id": 3, "t_hits": 1922 } + { "index": {"_index": "hits"}} + { "endpoint_id": 4, "t_hits": 1547 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Response performRequestWithRemoteSearchUser(final Request request) throws IOException { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_SEARCH_USER, PASS)) + ); + return client().performRequest(request); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(body)); + + return request; + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index d5b3141b539eb..09449f81121fd 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; @@ -31,6 +32,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Base64; import java.util.List; import java.util.Map; @@ -43,9 +45,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class RemoteClusterSecurityEsqlIT extends AbstractRemoteClusterSecurityTestCase { private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); @@ -347,7 +352,7 @@ public void testCrossClusterQuery() throws Exception { assertRemoteOnlyResults(response); // same as above but authenticate with API key - response = performRequestWithRemoteSearchUserViaAPIKey(request); + response = performRequestWithRemoteSearchUserViaAPIKey(request, createRemoteSearchUserAPIKey()); assertRemoteOnlyResults(response); // query remote and local cluster @@ -495,7 +500,7 @@ public void testCrossClusterQueryWithRemoteDLSAndFLS() throws Exception { } /** - * Note: invalid_remote is "invalid" because it has a bogus API key and the cluster does not exist (cannot be connected to) + * Note: invalid_remote is "invalid" because it has a bogus API key */ @SuppressWarnings("unchecked") public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { @@ -521,13 +526,19 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { // invalid remote with local index should return local results { var q = "FROM invalid_remote:employees,employees | SORT emp_id DESC | LIMIT 10"; - Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); - // TODO: when skip_unavailable=false for invalid_remote, a fatal exception should be thrown - // this does not yet happen because field-caps returns nothing for this cluster, rather - // than an error, so the current code cannot detect that error. Follow on PR will handle this. - assertLocalOnlyResults(response); + if (skipUnavailable) { + Response response = performRequestWithRemoteSearchUser(esqlRequest(q)); + // this does not yet happen because field-caps returns nothing for this cluster, rather + // than an error, so the current code cannot detect that error. Follow on PR will handle this. + assertLocalOnlyResultsAndSkippedRemote(response); + } else { + // errors from invalid remote should throw an exception if the cluster is marked with skip_unavailable=false + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(esqlRequest(q))); + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + // TODO: in follow on PR, figure out why this is returning the wrong error - should be "cannot connect to invalid_remote" + assertThat(error.getMessage(), containsString("Unknown index [invalid_remote:employees]")); + } } - { var q = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; // errors from invalid remote should be ignored if the cluster is marked with skip_unavailable=true @@ -560,10 +571,9 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { } else { // errors from invalid remote should throw an exception if the cluster is marked with skip_unavailable=false - ResponseException error = expectThrows(ResponseException.class, () -> { - final Response response1 = performRequestWithRemoteSearchUser(esqlRequest(q)); - }); + ResponseException error = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(esqlRequest(q))); assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + // TODO: in follow on PR, figure out why this is returning the wrong error - should be "cannot connect to invalid_remote" assertThat(error.getMessage(), containsString("unable to find apikey")); } } @@ -699,7 +709,7 @@ public void testCrossClusterEnrich() throws Exception { assertWithEnrich(response); // same as above but authenticate with API key - response = performRequestWithRemoteSearchUserViaAPIKey(request); + response = performRequestWithRemoteSearchUserViaAPIKey(request, createRemoteSearchUserAPIKey()); assertWithEnrich(response); // Query cluster @@ -963,6 +973,462 @@ public void testAlias() throws Exception { removeAliases(); } + @SuppressWarnings("unchecked") + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableTrue() throws Exception { + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, false, randomBoolean(), true); + populateData(); + { + final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleRequest.setJsonEntity(""" + { + "indices": [{"names": ["employees*"], "privileges": ["read","read_cross_cluster"]}], + "cluster": [ "manage_own_api_key" ], + "remote_indices": [ + { + "names": ["employees*"], + "privileges": ["read"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + Response response = adminClient().performRequest(putRoleRequest); + assertOK(response); + } + + String remoteSearchUserAPIKey = createRemoteSearchUserAPIKey(); + + // sanity check - init queries to ensure we can query employees on local and employees,employees2 on remote + { + Request request = esqlRequest(""" + FROM employees,my_remote_cluster:employees,my_remote_cluster:employees2 + | SORT emp_id ASC + | LIMIT 9 + | KEEP emp_id, department"""); + + CheckedConsumer verifier = resp -> { + assertOK(resp); + Map map = responseAsMap(resp); + assertThat(((ArrayList) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((ArrayList) map.get("values")).size(), greaterThanOrEqualTo(1)); + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "employees,employees2", "successful", null) + ) + ); + }; + + verifier.accept(performRequestWithRemoteSearchUser(request)); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(request, remoteSearchUserAPIKey)); + } + + // missing concrete local index is an error + { + String q = "FROM employees_nomatch,my_remote_cluster:employees"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + } + + // missing concrete remote index is not fatal when skip_unavailable=true (as long as an index matches on another cluster) + { + String q = "FROM employees,my_remote_cluster:employees_nomatch"; + + CheckedBiConsumer verifier = new CheckedBiConsumer() { + @Override + public void accept(Response response, Boolean limit0) throws Exception { + assertOK(response); + Map map = responseAsMap(response); + assertThat(((List) map.get("columns")).size(), greaterThanOrEqualTo(1)); + if (limit0) { + assertThat(((List) map.get("values")).size(), equalTo(0)); + } else { + assertThat(((List) map.get("values")).size(), greaterThanOrEqualTo(1)); + } + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", "employees", "successful", limit0 ? 0 : null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "employees_nomatch", "skipped", 0) + ) + ); + } + }; + Request limit1 = esqlRequest(q + " | LIMIT 1"); + verifier.accept(performRequestWithRemoteSearchUser(limit1), false); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey), false); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + verifier.accept(performRequestWithRemoteSearchUser(limit0), true); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey), true); + } + + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String q = "FROM employees_nomatch*,my_remote_cluster:employees"; + + CheckedBiConsumer verifier = (response, limit0) -> { + assertOK(response); + Map map = responseAsMap(response); + assertThat(((List) map.get("columns")).size(), greaterThanOrEqualTo(1)); + if (limit0) { + assertThat(((List) map.get("values")).size(), equalTo(0)); + } else { + assertThat(((List) map.get("values")).size(), greaterThanOrEqualTo(1)); + } + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "employees_nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "employees", "successful", limit0 ? 0 : null) + ) + ); + }; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + verifier.accept(performRequestWithRemoteSearchUser(limit1), false); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey), false); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + verifier.accept(performRequestWithRemoteSearchUser(limit0), true); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey), true); + } + + // since at least one index of the query matches on some cluster, a missing wildcarded index on skip_un=true is not an error + { + String q = "FROM employees,my_remote_cluster:employees_nomatch*"; + + CheckedBiConsumer verifier = (response, limit0) -> { + assertOK(response); + Map map = responseAsMap(response); + assertThat(((List) map.get("columns")).size(), greaterThanOrEqualTo(1)); + if (limit0) { + assertThat(((List) map.get("values")).size(), equalTo(0)); + } else { + assertThat(((List) map.get("values")).size(), greaterThanOrEqualTo(1)); + } + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + new ExpectedCluster("(local)", "employees", "successful", limit0 ? 0 : null), + new ExpectedCluster("my_remote_cluster", "employees_nomatch*", "skipped", 0) + ) + ); + }; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + verifier.accept(performRequestWithRemoteSearchUser(limit1), false); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey), false); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + verifier.accept(performRequestWithRemoteSearchUser(limit0), true); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey), true); + } + + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true + { + // with non-matching concrete index + String q = "FROM my_remote_cluster:employees_nomatch"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + } + + // an error is thrown if there are no matching indices at all, even when the cluster is skip_unavailable=true and the + // index was wildcarded + { + String localExpr = randomFrom("nomatch", "nomatch*"); + String remoteExpr = randomFrom("nomatch", "nomatch*"); + String q = Strings.format("FROM %s,%s:%s", localExpr, REMOTE_CLUSTER_ALIAS, remoteExpr); + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + } + + // missing concrete index on skip_unavailable=true cluster is not an error + { + String q = "FROM employees,my_remote_cluster:employees_nomatch,my_remote_cluster:employees*"; + + CheckedBiConsumer verifier = (response, limit0) -> { + assertOK(response); + Map map = responseAsMap(response); + assertThat(((List) map.get("columns")).size(), greaterThanOrEqualTo(1)); + if (limit0) { + assertThat(((List) map.get("values")).size(), equalTo(0)); + } else { + assertThat(((List) map.get("values")).size(), greaterThanOrEqualTo(1)); + } + final List expectedClusters = List.of( + new ExpectedCluster("(local)", "employees", "successful", limit0 ? 0 : null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "employees_nomatch,employees*", "successful", 0) + ); + assertExpectedClustersForMissingIndicesTests(map, expectedClusters); + }; + + // TODO: uncomment in follow on PR handling skip_unavailable errors at execution time + // Request limit1 = esqlRequest(q + " | LIMIT 1"); + // verifier.accept(performRequestWithRemoteSearchUser(limit1), false); + // verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey), false); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + verifier.accept(performRequestWithRemoteSearchUser(limit0), true); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey), true); + } + } + + @SuppressWarnings("unchecked") + public void testSearchesAgainstNonMatchingIndicesWithSkipUnavailableFalse() throws Exception { + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, false, randomBoolean(), false); + populateData(); + + { + final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleRequest.setJsonEntity(""" + { + "indices": [{"names": ["employees*"], "privileges": ["read","read_cross_cluster"]}], + "cluster": [ "manage_own_api_key" ], + "remote_indices": [ + { + "names": ["employees*"], + "privileges": ["read"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + Response response = adminClient().performRequest(putRoleRequest); + assertOK(response); + } + + String remoteSearchUserAPIKey = createRemoteSearchUserAPIKey(); + + // sanity check - init queries to ensure we can query employees on local and employees,employees2 on remote + { + Request request = esqlRequest(""" + FROM employees,my_remote_cluster:employees,my_remote_cluster:employees2 + | SORT emp_id ASC + | LIMIT 5 + | KEEP emp_id, department"""); + + CheckedConsumer verifier = resp -> { + assertOK(resp); + Map map = responseAsMap(resp); + assertThat(((List) map.get("columns")).size(), greaterThanOrEqualTo(1)); + assertThat(((List) map.get("values")).size(), greaterThanOrEqualTo(1)); + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "nomatch*", "successful", null), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "employees,employees2", "successful", null) + ) + ); + }; + + final Response response = performRequestWithRemoteSearchUser(request); + assertOK(response); + verifier.accept(performRequestWithRemoteSearchUser(request)); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(request, remoteSearchUserAPIKey)); + } + + // missing concrete local index is an error + { + String q = "FROM employees_nomatch,my_remote_cluster:employees"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [employees_nomatch]")); + } + + // missing concrete remote index is fatal error when skip_unavailable=false + { + String q = "FROM employees,my_remote_cluster:employees_nomatch"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + } + + // since there is at least one matching index in the query, the missing wildcarded local index is not an error + { + String q = "FROM employees_nomatch*,my_remote_cluster:employees"; + + CheckedBiConsumer verifier = (response, limit0) -> { + assertOK(response); + Map map = responseAsMap(response); + assertThat(((List) map.get("columns")).size(), greaterThanOrEqualTo(1)); + if (limit0) { + assertThat(((List) map.get("values")).size(), equalTo(0)); + } else { + assertThat(((List) map.get("values")).size(), greaterThanOrEqualTo(1)); + } + assertExpectedClustersForMissingIndicesTests( + map, + List.of( + // local cluster is never marked as SKIPPED even when no matching indices - just marked as 0 shards searched + new ExpectedCluster("(local)", "employees_nomatch*", "successful", 0), + new ExpectedCluster(REMOTE_CLUSTER_ALIAS, "employees", "successful", limit0 ? 0 : null) + ) + ); + }; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + verifier.accept(performRequestWithRemoteSearchUser(limit1), false); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey), false); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + verifier.accept(performRequestWithRemoteSearchUser(limit0), true); + verifier.accept(performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey), true); + } + + // query is fatal since the remote cluster has skip_unavailable=false and has no matching indices + { + String q = "FROM employees,my_remote_cluster:employees_nomatch*"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch*]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch*]")); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch*]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch*]")); + } + + // an error is thrown if there are no matching indices at all + { + // with non-matching concrete index + String q = "FROM my_remote_cluster:employees_nomatch"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + } + + // an error is thrown if there are no matching indices at all + { + String localExpr = randomFrom("nomatch", "nomatch*"); + String remoteExpr = randomFrom("nomatch", "nomatch*"); + String q = Strings.format("FROM %s,%s:%s", localExpr, REMOTE_CLUSTER_ALIAS, remoteExpr); + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + + Request limit0 = esqlRequest(q + " | LIMIT 0"); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit0, remoteSearchUserAPIKey)); + assertThat(e.getMessage(), containsString("Unknown index")); + assertThat(e.getMessage(), containsString(Strings.format("%s:%s", REMOTE_CLUSTER_ALIAS, remoteExpr))); + } + + // error since the remote cluster with skip_unavailable=false specified a concrete index that is not found + { + String q = "FROM employees,my_remote_cluster:employees_nomatch,my_remote_cluster:employees*"; + + Request limit1 = esqlRequest(q + " | LIMIT 1"); + ResponseException e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit1)); + /* Example error: + *{"error":{"root_cause":[{"type":"security_exception","reason":"action [indices:data/read/esql/cluster] towards + * remote cluster is unauthorized for user [remote_search_user] with assigned roles [remote_search] authenticated by + * API key id [zaeMK5MBeGk5jCIiFtqB] of user [test_user] on indices [employees_nomatch], this action is granted by + * the index privileges [read,all]"}],"type":"security_exception","reason":"action [indices:data/read/esql/cluster] + * towards remote cluster is unauthorized for user [remote_search_user] with assigned roles [remote_search] authenticated + * by API key id [zaeMK5MBeGk5jCIiFtqB] of user [test_user] on indices [employees_nomatch], this action is granted by the + * index privileges [read,all]"},"status":403}" + */ + assertThat(e.getMessage(), containsString("unauthorized for user [remote_search_user]")); + assertThat(e.getMessage(), containsString("on indices [employees_nomatch]")); + assertThat(e.getMessage(), containsString("security_exception")); + + e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUserViaAPIKey(limit1, remoteSearchUserAPIKey)); + /* Example error: + * {"error":{"root_cause":[{"type":"security_exception","reason":"action [indices:data/read/esql/cluster] towards + * remote cluster is unauthorized for API key id [sxuSK5MBSfGSGj4YFLyv] of user [remote_search_user] authenticated by + * API key id [cUiRK5MB5j18U5stsvQj] of user [test_user] on indices [employees_nomatch], this action is granted by + * the index privileges [read,all]"}],"type":"security_exception","reason":"action [indices:data/read/esql/cluster] + * towards remote cluster is unauthorized for API key id [sxuSK5MBSfGSGj4YFLyv] of user [remote_search_user] authenticated + * by API key id [cUiRK5MB5j18U5stsvQj] of user [test_user] on indices [employees_nomatch], this action is granted by the + * index privileges [read,all]"},"status":403}" + */ + assertThat(e.getMessage(), containsString("unauthorized for API key id")); + assertThat(e.getMessage(), containsString("of user [remote_search_user]")); + assertThat(e.getMessage(), containsString("on indices [employees_nomatch]")); + assertThat(e.getMessage(), containsString("security_exception")); + + // TODO: in follow on PR, add support for throwing a VerificationException for this scenario - no exception is currently thrown + // Request limit0 = esqlRequest(q + " | LIMIT 0"); + // e = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(limit0)); + // assertThat(e.getMessage(), containsString("Unknown index [my_remote_cluster:employees_nomatch]")); + } + } + protected Request esqlRequest(String command) throws IOException { XContentBuilder body = JsonXContent.contentBuilder(); body.startObject(); @@ -1002,7 +1468,12 @@ private Response performRequestWithRemoteSearchUser(final Request request) throw return client().performRequest(request); } - private Response performRequestWithRemoteSearchUserViaAPIKey(final Request request) throws IOException { + private Response performRequestWithRemoteSearchUserViaAPIKey(Request request, String encodedApiKey) throws IOException { + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encodedApiKey)); + return client().performRequest(request); + } + + private String createRemoteSearchUserAPIKey() throws IOException { final Request createApiKeyRequest = new Request("POST", "_security/api_key"); createApiKeyRequest.setJsonEntity(""" { @@ -1016,8 +1487,7 @@ private Response performRequestWithRemoteSearchUserViaAPIKey(final Request reque assertOK(response); final Map responseAsMap = responseAsMap(response); final String encoded = (String) responseAsMap.get("encoded"); - request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encoded)); - return client().performRequest(request); + return encoded; } @SuppressWarnings("unchecked") @@ -1049,7 +1519,7 @@ private void assertRemoteOnlyAgainst2IndexResults(Response response) throws IOEx } @SuppressWarnings("unchecked") - private void assertLocalOnlyResults(Response response) throws IOException { + private void assertLocalOnlyResultsAndSkippedRemote(Response response) throws IOException { assertOK(response); Map responseAsMap = entityAsMap(response); List columns = (List) responseAsMap.get("columns"); @@ -1061,6 +1531,34 @@ private void assertLocalOnlyResults(Response response) throws IOException { .collect(Collectors.toList()); // local results assertThat(flatList, containsInAnyOrder("2", "4", "6", "8", "support", "management", "engineering", "marketing")); + Map clusters = (Map) responseAsMap.get("_clusters"); + + /* + clusters map: + {running=0, total=2, details={ + invalid_remote={_shards={total=0, failed=0, successful=0, skipped=0}, took=176, indices=employees, + failures=[{reason={reason=Unable to connect to [invalid_remote], type=connect_transport_exception}, + index=null, shard=-1}], status=skipped}, + (local)={_shards={total=1, failed=0, successful=1, skipped=0}, took=298, indices=employees, status=successful}}, + failed=0, partial=0, successful=1, skipped=1} + */ + + assertThat((int) clusters.get("total"), equalTo(2)); + assertThat((int) clusters.get("successful"), equalTo(1)); + assertThat((int) clusters.get("skipped"), equalTo(1)); + + Map details = (Map) clusters.get("details"); + Map invalidRemoteMap = (Map) details.get("invalid_remote"); + assertThat(invalidRemoteMap.get("status").toString(), equalTo("skipped")); + List failures = (List) invalidRemoteMap.get("failures"); + assertThat(failures.size(), equalTo(1)); + Map failureMap = (Map) failures.get(0); + Map reasonMap = (Map) failureMap.get("reason"); + assertThat(reasonMap.get("reason").toString(), containsString("Unable to connect to [invalid_remote]")); + assertThat(reasonMap.get("type").toString(), containsString("connect_transport_exception")); + + Map localCluster = (Map) details.get("(local)"); + assertThat(localCluster.get("status").toString(), equalTo("successful")); } @SuppressWarnings("unchecked") @@ -1112,4 +1610,54 @@ private void assertWithEnrich(Response response) throws IOException { assertThat(flatList, containsInAnyOrder(2, 3, "usa", "canada")); } + record ExpectedCluster(String clusterAlias, String indexExpression, String status, Integer totalShards) {} + + @SuppressWarnings("unchecked") + void assertExpectedClustersForMissingIndicesTests(Map responseMap, List expected) { + Map clusters = (Map) responseMap.get("_clusters"); + assertThat((int) responseMap.get("took"), greaterThan(0)); + + Map detailsMap = (Map) clusters.get("details"); + assertThat(detailsMap.size(), is(expected.size())); + + assertThat((int) clusters.get("total"), is(expected.size())); + assertThat((int) clusters.get("successful"), is((int) expected.stream().filter(ec -> ec.status().equals("successful")).count())); + assertThat((int) clusters.get("skipped"), is((int) expected.stream().filter(ec -> ec.status().equals("skipped")).count())); + assertThat((int) clusters.get("failed"), is((int) expected.stream().filter(ec -> ec.status().equals("failed")).count())); + + for (ExpectedCluster expectedCluster : expected) { + Map clusterDetails = (Map) detailsMap.get(expectedCluster.clusterAlias()); + String msg = expectedCluster.clusterAlias(); + + assertThat(msg, (int) clusterDetails.get("took"), greaterThan(0)); + assertThat(msg, clusterDetails.get("status"), is(expectedCluster.status())); + Map shards = (Map) clusterDetails.get("_shards"); + if (expectedCluster.totalShards() == null) { + assertThat(msg, (int) shards.get("total"), greaterThan(0)); + } else { + assertThat(msg, (int) shards.get("total"), is(expectedCluster.totalShards())); + } + + if (expectedCluster.status().equals("successful")) { + assertThat((int) shards.get("successful"), is((int) shards.get("total"))); + assertThat((int) shards.get("skipped"), is(0)); + + } else if (expectedCluster.status().equals("skipped")) { + assertThat((int) shards.get("successful"), is(0)); + assertThat((int) shards.get("skipped"), is((int) shards.get("total"))); + ArrayList failures = (ArrayList) clusterDetails.get("failures"); + assertThat(failures.size(), is(1)); + Map failure1 = (Map) failures.get(0); + Map innerReason = (Map) failure1.get("reason"); + String expectedMsg = "Unknown index [" + expectedCluster.indexExpression() + "]"; + assertThat(innerReason.get("reason").toString(), containsString(expectedMsg)); + assertThat(innerReason.get("type").toString(), containsString("verification_exception")); + + } else { + fail(msg + "; Unexpected status: " + expectedCluster.status()); + } + // currently failed shards is always zero - change this once we start allowing partial data for individual shard failures + assertThat((int) shards.get("failed"), is(0)); + } + } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityReloadCredentialsRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityReloadCredentialsRestIT.java index 42982e6183613..fb941e9e815cf 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityReloadCredentialsRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityReloadCredentialsRestIT.java @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.nullValue; // account for slow stored secure settings updates (involves removing and re-creating the keystore) -@TimeoutSuite(millis = 10 * TimeUnits.MINUTE) +@TimeoutSuite(millis = 20 * TimeUnits.MINUTE) public class RemoteClusterSecurityReloadCredentialsRestIT extends AbstractRemoteClusterSecurityTestCase { private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index df97c489cc6b7..8df10037affdb 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.security.operator; +import org.elasticsearch.cluster.metadata.DataStream; + import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -358,6 +360,7 @@ public class Constants { "cluster:monitor/nodes/data_tier_usage", "cluster:monitor/nodes/features", "cluster:monitor/nodes/hot_threads", + "cluster:monitor/nodes/index_mode_stats", "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats", "cluster:monitor/nodes/usage", @@ -399,6 +402,7 @@ public class Constants { "cluster:monitor/xpack/info/frozen_indices", "cluster:monitor/xpack/info/graph", "cluster:monitor/xpack/info/ilm", + "cluster:monitor/xpack/info/logsdb", "cluster:monitor/xpack/info/logstash", "cluster:monitor/xpack/info/ml", "cluster:monitor/xpack/info/monitoring", @@ -463,6 +467,7 @@ public class Constants { "cluster:monitor/xpack/usage/health_api", "cluster:monitor/xpack/usage/ilm", "cluster:monitor/xpack/usage/inference", + "cluster:monitor/xpack/usage/logsdb", "cluster:monitor/xpack/usage/logstash", "cluster:monitor/xpack/usage/ml", "cluster:monitor/xpack/usage/monitoring", @@ -488,6 +493,7 @@ public class Constants { "indices:admin/block/add[s]", "indices:admin/cache/clear", "indices:admin/data_stream/lazy_rollover", + "indices:admin/data_stream/reindex", "indices:internal/admin/ccr/restore/file_chunk/get", "indices:internal/admin/ccr/restore/session/clear", "indices:internal/admin/ccr/restore/session/put", @@ -504,9 +510,9 @@ public class Constants { "indices:admin/data_stream/lifecycle/get", "indices:admin/data_stream/lifecycle/put", "indices:admin/data_stream/lifecycle/explain", - "indices:admin/data_stream/options/delete", - "indices:admin/data_stream/options/get", - "indices:admin/data_stream/options/put", + DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/delete" : null, + DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/get" : null, + DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/put" : null, "indices:admin/delete", "indices:admin/flush", "indices:admin/flush[s]", diff --git a/x-pack/plugin/security/qa/profile/build.gradle b/x-pack/plugin/security/qa/profile/build.gradle index ac821e670fde0..b0a1927ab9dfe 100644 --- a/x-pack/plugin/security/qa/profile/build.gradle +++ b/x-pack/plugin/security/qa/profile/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-java-rest-test' @@ -7,7 +12,7 @@ dependencies { javaRestTestImplementation project(':x-pack:plugin:security') } -boolean literalUsername = BuildParams.random.nextBoolean() +boolean literalUsername = buildParams.random.nextBoolean() tasks.named("javaRestTest").configure { usesDefaultDistribution() diff --git a/x-pack/plugin/security/qa/security-basic/build.gradle b/x-pack/plugin/security/qa/security-basic/build.gradle index 7684d879671ab..8740354646346 100644 --- a/x-pack/plugin/security/qa/security-basic/build.gradle +++ b/x-pack/plugin/security/qa/security-basic/build.gradle @@ -1,8 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-java-rest-test' -import org.elasticsearch.gradle.internal.info.BuildParams - dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('security')))) javaRestTestImplementation(testArtifact(project(xpackModule('core')))) @@ -13,7 +17,7 @@ tasks.named('javaRestTest') { } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/security/qa/security-disabled/build.gradle b/x-pack/plugin/security/qa/security-disabled/build.gradle index eba70753c9f28..6fa100f392b9a 100644 --- a/x-pack/plugin/security/qa/security-disabled/build.gradle +++ b/x-pack/plugin/security/qa/security-disabled/build.gradle @@ -1,3 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + /* * This QA project tests the security plugin when security is explicitly disabled. * It is intended to cover security functionality which is supposed to @@ -5,7 +12,6 @@ * For example: If a cluster has a pipeline with the set_security_user processor * defined, it should be not fail */ -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' @@ -17,5 +23,5 @@ dependencies { tasks.named("javaRestTest").configure { usesDefaultDistribution() // Test clusters run with security disabled - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle index fbe91009011e3..c0df6a4f27f58 100644 --- a/x-pack/plugin/security/qa/tls-basic/build.gradle +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -1,13 +1,18 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -import org.elasticsearch.gradle.internal.info.BuildParams +apply plugin: 'elasticsearch.legacy-java-rest-test' dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('security')))) javaRestTestImplementation(testArtifact(project(xpackModule('core')))) } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java index a5f827c2a4b53..82a10f21debfb 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java @@ -8,13 +8,11 @@ package org.elasticsearch.integration; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -24,7 +22,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.SecuritySingleNodeTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyRequest; @@ -43,6 +40,7 @@ import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; +import static org.elasticsearch.action.admin.cluster.storedscripts.StoredScriptIntegTestUtils.newPutStoredScriptTestRequest; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; @@ -350,17 +348,8 @@ public void testRequestCacheWithTemplateRoleQuery() { private void prepareIndices() { final Client client = client(); - assertAcked( - safeExecute( - TransportPutStoredScriptAction.TYPE, - new PutStoredScriptRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).id("my-script") - .content( - new BytesArray(""" - {"script":{"source":"{\\"match\\":{\\"username\\":\\"{{_user.username}}\\"}}","lang":"mustache"}}"""), - XContentType.JSON - ) - ) - ); + assertAcked(safeExecute(TransportPutStoredScriptAction.TYPE, newPutStoredScriptTestRequest("my-script", """ + {"script":{"source":"{\\"match\\":{\\"username\\":\\"{{_user.username}}\\"}}","lang":"mustache"}}"""))); assertAcked(indicesAdmin().prepareCreate(DLS_INDEX).addAlias(new Alias("dls-alias")).get()); client.prepareIndex(DLS_INDEX).setId("101").setSource("number", 101, "letter", "A").get(); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java index bc01b0693af0a..2851af1461012 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/ssl/SslClientAuthenticationTests.java @@ -107,7 +107,7 @@ public void testThatHttpFailsWithoutSslClientAuth() throws IOException { if (inFipsJvm()) { Throwable t = ExceptionsHelper.unwrap(e, CertificateException.class); assertThat(t, instanceOf(CertificateException.class)); - assertThat(t.getMessage(), containsString("Unable to find certificate chain")); + assertThat(t.getMessage(), containsString("Unable to construct a valid chain")); } else { Throwable t = ExceptionsHelper.unwrap(e, CertPathBuilderException.class); assertThat(t, instanceOf(CertPathBuilderException.class)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 0b387a738a2c5..ef66392a87260 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -1048,8 +1048,6 @@ Collection createComponents( getClock(), client, systemIndices.getProfileIndexManager(), - clusterService, - featureService, realms ); components.add(profileService); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java index d0292f32cd75f..53ecafa280715 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java @@ -7,18 +7,14 @@ package org.elasticsearch.xpack.security; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import java.util.Map; import java.util.Set; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MIGRATION_FRAMEWORK; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.VERSION_SECURITY_PROFILE_ORIGIN; public class SecurityFeatures implements FeatureSpecification { @@ -26,9 +22,4 @@ public class SecurityFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of(SECURITY_ROLE_MAPPING_CLEANUP, SECURITY_ROLES_METADATA_FLATTENED, SECURITY_MIGRATION_FRAMEWORK); } - - @Override - public Map getHistoricalFeatures() { - return Map.of(SECURITY_PROFILE_ORIGIN_FEATURE, VERSION_SECURITY_PROFILE_ORIGIN); - } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java index 13e3e40887d89..58516b1d8324d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java @@ -27,10 +27,11 @@ public class AuditUtil { public static String restRequestContent(RestRequest request) { if (request.hasContent()) { + var content = request.content(); try { - return XContentHelper.convertToJson(request.content(), false, false, request.getXContentType()); + return XContentHelper.convertToJson(content, false, false, request.getXContentType()); } catch (IOException ioe) { - return "Invalid Format: " + request.content().utf8ToString(); + return "Invalid Format: " + content.utf8ToString(); } } return ""; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java index b347c278aae08..a3ee313c7f1d9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java @@ -35,7 +35,6 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.BackoffPolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -45,7 +44,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; @@ -100,14 +98,12 @@ import static org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction.toSingleItemBulkRequest; import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_PROFILE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.security.authc.Authentication.isFileOrNativeRealm; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ALIAS; -import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE; public class ProfileService { private static final Logger logger = LogManager.getLogger(ProfileService.class); @@ -120,26 +116,14 @@ public class ProfileService { private final Clock clock; private final Client client; private final SecurityIndexManager profileIndex; - private final ClusterService clusterService; - private final FeatureService featureService; private final Function domainConfigLookup; private final Function realmRefLookup; - public ProfileService( - Settings settings, - Clock clock, - Client client, - SecurityIndexManager profileIndex, - ClusterService clusterService, - FeatureService featureService, - Realms realms - ) { + public ProfileService(Settings settings, Clock clock, Client client, SecurityIndexManager profileIndex, Realms realms) { this.settings = settings; this.clock = clock; this.client = client; this.profileIndex = profileIndex; - this.clusterService = clusterService; - this.featureService = featureService; this.domainConfigLookup = realms::getDomainConfig; this.realmRefLookup = realms::getRealmRef; } @@ -273,7 +257,7 @@ public void suggestProfile(SuggestProfilesRequest request, TaskId parentTaskId, listener::onFailure, () -> executeAsyncWithOrigin( client, - getActionOrigin(), + SECURITY_PROFILE_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { @@ -403,7 +387,7 @@ public void usageStats(ActionListener> listener) { listener::onFailure, () -> executeAsyncWithOrigin( client, - getActionOrigin(), + SECURITY_PROFILE_ORIGIN, TransportMultiSearchAction.TYPE, multiSearchRequest, ActionListener.wrap(multiSearchResponse -> { @@ -484,7 +468,7 @@ private void getVersionedDocument(String uid, ActionListener listener::onFailure, () -> executeAsyncWithOrigin( client, - getActionOrigin(), + SECURITY_PROFILE_ORIGIN, TransportGetAction.TYPE, getRequest, ActionListener.wrap(response -> { @@ -514,7 +498,7 @@ private void getVersionedDocuments(Collection uids, ActionListener { frozenProfileIndex.checkIndexVersionThenExecute( listener::onFailure, - () -> new OriginSettingClient(client, getActionOrigin()).prepareMultiGet() + () -> new OriginSettingClient(client, SECURITY_PROFILE_ORIGIN).prepareMultiGet() .addIds(frozenProfileIndex.aliasName(), uids.stream().map(ProfileService::uidToDocId).toArray(String[]::new)) .execute(ActionListener.wrap(multiGetResponse -> { List retrievedDocs = new ArrayList<>(multiGetResponse.getResponses().length); @@ -589,7 +573,7 @@ private void searchVersionedDocumentsForSubjects( subjects.forEach(subject -> multiSearchRequest.add(buildSearchRequestForSubject(subject))); executeAsyncWithOrigin( client, - getActionOrigin(), + SECURITY_PROFILE_ORIGIN, TransportMultiSearchAction.TYPE, multiSearchRequest, ActionListener.wrap( @@ -742,7 +726,7 @@ void createNewProfile(Subject subject, String uid, ActionListener liste listener::onFailure, () -> executeAsyncWithOrigin( client, - getActionOrigin(), + SECURITY_PROFILE_ORIGIN, TransportBulkAction.TYPE, bulkRequest, TransportBulkAction.unwrappingSingleItemBulkResponse(ActionListener.wrap(indexResponse -> { @@ -1007,7 +991,7 @@ void doUpdate(UpdateRequest updateRequest, ActionListener listen listener::onFailure, () -> executeAsyncWithOrigin( client, - getActionOrigin(), + SECURITY_PROFILE_ORIGIN, TransportUpdateAction.TYPE, updateRequest, ActionListener.wrap(updateResponse -> { @@ -1019,15 +1003,6 @@ void doUpdate(UpdateRequest updateRequest, ActionListener listen ); } - private String getActionOrigin() { - // profile origin and user is not available before v8.3.0 - if (featureService.clusterHasFeature(clusterService.state(), SECURITY_PROFILE_ORIGIN_FEATURE)) { - return SECURITY_PROFILE_ORIGIN; - } else { - return SECURITY_ORIGIN; - } - } - private static String uidToDocId(String uid) { return DOC_ID_PREFIX + uid; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java index f0405e42f1f22..d5d11ea42e345 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java @@ -75,7 +75,7 @@ protected final RestChannelConsumer prepareRequest(RestRequest request, NodeClie return innerPrepareRequest(request, client); } else { request.params().keySet().forEach(key -> request.param(key, "")); - request.content(); + request.content(); // mark content consumed return channel -> channel.sendResponse(new RestResponse(channel, failedFeature)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java index f2233a7e19fd0..8029ed3ba45e4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java @@ -8,7 +8,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.license.XPackLicenseState; @@ -77,7 +77,7 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c * Consume the body immediately. This ensures that if there is a body and we later reject the request (e.g., because security is not * enabled) that the REST infrastructure will not reject the request for not having consumed the body. */ - final Tuple content = request.contentOrSourceParam(); + final Tuple content = request.contentOrSourceParam(); final String username = getUsername(request); if (username == null) { return restChannel -> { throw new ElasticsearchSecurityException("there is no authenticated user"); }; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 609e6696bcb0f..7b3f6a8d2ae55 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -9,7 +9,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -57,8 +56,6 @@ public class SecuritySystemIndices { public static final String INTERNAL_SECURITY_PROFILE_INDEX_8 = ".security-profile-8"; public static final String SECURITY_PROFILE_ALIAS = ".security-profile"; - public static final Version VERSION_SECURITY_PROFILE_ORIGIN = Version.V_8_3_0; - public static final NodeFeature SECURITY_PROFILE_ORIGIN_FEATURE = new NodeFeature("security.security_profile_origin"); public static final NodeFeature SECURITY_MIGRATION_FRAMEWORK = new NodeFeature("security.migration_framework"); public static final NodeFeature SECURITY_ROLES_METADATA_FLATTENED = new NodeFeature("security.roles_metadata_flattened"); public static final NodeFeature SECURITY_ROLE_MAPPING_CLEANUP = new NodeFeature("security.role_mapping_cleanup"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java index 6b9594c1c68ea..87651a96d75a6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java @@ -82,8 +82,9 @@ public void testInvalidateTokensWhenIndexUnavailable() throws Exception { when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(false); when(securityIndex.indexExists()).thenReturn(true); when(securityIndex.defensiveCopy()).thenReturn(securityIndex); - when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)) - .thenReturn(new ElasticsearchException("simulated")); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn( + new ElasticsearchException("simulated") + ); final TokenService tokenService = new TokenService( SETTINGS, Clock.systemUTC(), @@ -102,8 +103,11 @@ public void testInvalidateTokensWhenIndexUnavailable() throws Exception { Tuple newTokenBytes = tokenService.getRandomTokenBytes(true); InvalidateTokenRequest request = new InvalidateTokenRequest( - tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), - ACCESS_TOKEN.getValue(), null, null); + tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), + ACCESS_TOKEN.getValue(), + null, + null + ); PlainActionFuture accessTokenfuture = new PlainActionFuture<>(); action.doExecute(null, request, accessTokenfuture); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, accessTokenfuture::actionGet); @@ -148,8 +152,11 @@ public void testInvalidateTokensWhenIndexClosed() throws Exception { Tuple newTokenBytes = tokenService.getRandomTokenBytes(true); InvalidateTokenRequest request = new InvalidateTokenRequest( - tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), - ACCESS_TOKEN.getValue(), null, null); + tokenService.prependVersionAndEncodeAccessToken(TransportVersion.current(), newTokenBytes.v1()), + ACCESS_TOKEN.getValue(), + null, + null + ); PlainActionFuture accessTokenfuture = new PlainActionFuture<>(); action.doExecute(null, request, accessTokenfuture); ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, accessTokenfuture::actionGet); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 5adc1e351931d..3be40c280874d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -2614,7 +2614,7 @@ public void testAuthenticationSuccessRest() throws Exception { checkedFields.put(LoggingAuditTrail.REQUEST_METHOD_FIELD_NAME, request.method().toString()); checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); checkedFields.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri"); - if (includeRequestBody && Strings.hasLength(request.content())) { + if (includeRequestBody && request.hasContent()) { checkedFields.put(LoggingAuditTrail.REQUEST_BODY_FIELD_NAME, request.content().utf8ToString()); } if (params.isEmpty() == false) { @@ -2643,8 +2643,8 @@ public void testAuthenticationSuccessRest() throws Exception { checkedFields.put(LoggingAuditTrail.REQUEST_METHOD_FIELD_NAME, request.method().toString()); checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); checkedFields.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri"); - if (includeRequestBody && Strings.hasLength(request.content())) { - checkedFields.put(LoggingAuditTrail.REQUEST_BODY_FIELD_NAME, request.getHttpRequest().body().asFull().bytes().utf8ToString()); + if (includeRequestBody && request.hasContent()) { + checkedFields.put(LoggingAuditTrail.REQUEST_BODY_FIELD_NAME, request.content().utf8ToString()); } if (params.isEmpty() == false) { checkedFields.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, "foo=bar&evac=true"); @@ -2672,7 +2672,7 @@ public void testAuthenticationSuccessRest() throws Exception { checkedFields.put(LoggingAuditTrail.REQUEST_METHOD_FIELD_NAME, request.method().toString()); checkedFields.put(LoggingAuditTrail.REQUEST_ID_FIELD_NAME, requestId); checkedFields.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri"); - if (includeRequestBody && Strings.hasLength(request.content().utf8ToString())) { + if (includeRequestBody && request.hasContent()) { checkedFields.put(LoggingAuditTrail.REQUEST_BODY_FIELD_NAME, request.content().utf8ToString()); } if (params.isEmpty() == false) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index cd6c88cf525af..7b66a95609b05 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -2041,8 +2041,14 @@ public void testExpiredToken() throws Exception { .user(new User("creator")) .realmRef(new RealmRef("test", "test", "test")) .build(false); - tokenService.createOAuth2Tokens(newTokenBytes.v1(), newTokenBytes.v2(), expected, originatingAuth, Collections.emptyMap(), - tokenFuture); + tokenService.createOAuth2Tokens( + newTokenBytes.v1(), + newTokenBytes.v2(), + expected, + originatingAuth, + Collections.emptyMap(), + tokenFuture + ); } String token = tokenFuture.get().getAccessToken(); mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), expected, Map.of(), true, null, client); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java index 7219561dcf9df..aed39b24f217d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java @@ -76,11 +76,13 @@ public void init() throws Exception { } public void testAuthenticateThrowsOnUnsupportedMinVersions() throws IOException { - when(clusterService.state().getMinTransportVersion()).thenReturn(TransportVersionUtils.randomVersionBetween( + when(clusterService.state().getMinTransportVersion()).thenReturn( + TransportVersionUtils.randomVersionBetween( random(), TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) - )); + ) + ); final var authcContext = mock(Authenticator.Context.class, Mockito.RETURNS_DEEP_STUBS); when(authcContext.getThreadContext()).thenReturn(threadContext); final var crossClusterAccessHeaders = new CrossClusterAccessHeaders( @@ -93,12 +95,14 @@ public void testAuthenticateThrowsOnUnsupportedMinVersions() throws IOException when(auditableRequest.exceptionProcessingRequest(any(), any())).thenAnswer( i -> new ElasticsearchSecurityException("potato", (Exception) i.getArguments()[0]) ); - doAnswer(invocationOnMock -> new Authenticator.Context( + doAnswer( + invocationOnMock -> new Authenticator.Context( threadContext, auditableRequest, mock(Realms.class), (AuthenticationToken) invocationOnMock.getArguments()[2] - )).when(authenticationService).newContext(anyString(), any(), any()); + ) + ).when(authenticationService).newContext(anyString(), any(), any()); final PlainActionFuture future = new PlainActionFuture<>(); crossClusterAccessAuthenticationService.authenticate("action", mock(TransportRequest.class), future); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java index b35a2f8ccc4d3..02f397c23d3bd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java @@ -326,7 +326,7 @@ public void testRecordingFailedAuthenticationMetric() { @SuppressWarnings("unchecked") final ActionListener> listener = (ActionListener>) invocationOnMock .getArguments()[1]; - listener.onResponse(AuthenticationResult.unsuccessful("unsuccessful realms authentication", null)); + listener.onResponse(AuthenticationResult.unsuccessful("unsuccessful realms authentication", null)); return null; }).when(unsuccessfulRealm).authenticate(eq(authenticationToken), any()); @@ -337,7 +337,7 @@ public void testRecordingFailedAuthenticationMetric() { final PlainActionFuture> future = new PlainActionFuture<>(); realmsAuthenticator.authenticate(context, future); - var e = expectThrows(ElasticsearchSecurityException.class, () -> future.actionGet()); + var e = expectThrows(ElasticsearchSecurityException.class, () -> future.actionGet()); assertThat(e, sameInstance(exception)); assertSingleFailedAuthMetric( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 73a45dc20ac42..ed3949450cb9f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -363,9 +363,7 @@ public void testGetPrivilegesWillOnlyWaitOnUnavailableShardException() { public void testGetPrivilegesFailsAfterWaitOnUnavailableShardException() { when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(false).thenReturn(false); - when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn( - unavailableShardsException() - ); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(unavailableShardsException()); doAnswer(invocation -> { @SuppressWarnings("unchecked") final var listener = (ActionListener) invocation.getArguments()[0]; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index f076dc24e5d5b..6da1ddb61f11f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -36,9 +36,6 @@ import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -48,7 +45,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -88,7 +84,6 @@ import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.profile.ProfileDocument.ProfileDocumentUser; import org.elasticsearch.xpack.security.support.SecurityIndexManager; -import org.elasticsearch.xpack.security.support.SecuritySystemIndices; import org.elasticsearch.xpack.security.test.SecurityMocks; import org.hamcrest.Matchers; import org.junit.After; @@ -115,7 +110,6 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; -import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_PROFILE_ORIGIN; import static org.elasticsearch.xpack.core.security.support.Validation.VALID_NAME_CHARS; import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; @@ -187,7 +181,6 @@ public class ProfileServiceTests extends ESTestCase { private SecurityIndexManager profileIndex; private ProfileService profileService; Function realmRefLookup; - private boolean useProfileOrigin; @Before public void prepare() { @@ -208,29 +201,11 @@ public void prepare() { when(client.threadPool()).thenReturn(threadPool); when(client.prepareSearch(SECURITY_PROFILE_ALIAS)).thenReturn(new SearchRequestBuilder(client).setIndices(SECURITY_PROFILE_ALIAS)); this.profileIndex = SecurityMocks.mockSecurityIndexManager(SECURITY_PROFILE_ALIAS); - final ClusterService clusterService = mock(ClusterService.class); - final ClusterState clusterState = mock(ClusterState.class); - when(clusterService.state()).thenReturn(clusterState); - final DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); - when(clusterState.nodes()).thenReturn(discoveryNodes); - useProfileOrigin = randomBoolean(); - FeatureService featureService = mock(FeatureService.class); - when(featureService.clusterHasFeature(any(), eq(SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE))).thenReturn( - useProfileOrigin - ); realmRefLookup = realmIdentifier -> null; Realms realms = mock(Realms.class); when(realms.getDomainConfig(anyString())).then(args -> new DomainConfig(args.getArgument(0), Set.of(), false, null)); when(realms.getRealmRef(any(RealmConfig.RealmIdentifier.class))).then(args -> realmRefLookup.apply(args.getArgument(0))); - this.profileService = new ProfileService( - Settings.EMPTY, - Clock.systemUTC(), - client, - profileIndex, - clusterService, - featureService, - realms - ); + this.profileService = new ProfileService(Settings.EMPTY, Clock.systemUTC(), client, profileIndex, realms); } @After @@ -331,10 +306,7 @@ public void testGetProfileSubjectsWithMissingUids() throws Exception { final Collection allProfileUids = randomList(1, 5, () -> randomAlphaOfLength(20)); final Collection missingProfileUids = randomSubsetOf(allProfileUids); doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; List responses = new ArrayList<>(); for (MultiGetRequest.Item item : multiGetRequest.getItems()) { @@ -397,10 +369,7 @@ public void testGetProfileSubjectsWithMissingUids() throws Exception { public void testGetProfileSubjectWithFailures() throws Exception { final ElasticsearchException mGetException = new ElasticsearchException("mget Exception"); doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(mGetException); return null; @@ -413,10 +382,7 @@ public void testGetProfileSubjectWithFailures() throws Exception { final Collection errorProfileUids = randomSubsetOf(allProfileUids); final Collection missingProfileUids = Sets.difference(Set.copyOf(allProfileUids), Set.copyOf(errorProfileUids)); doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; List responses = new ArrayList<>(); for (MultiGetRequest.Item item : multiGetRequest.getItems()) { @@ -504,15 +470,7 @@ public void testLiteralUsernameWillThrowOnDuplicate() throws IOException { final Subject subject = new Subject(AuthenticationTestHelper.randomUser(), AuthenticationTestHelper.randomRealmRef(true)); Realms realms = mock(Realms.class); when(realms.getDomainConfig(anyString())).then(args -> new DomainConfig(args.getArgument(0), Set.of(), true, "suffix")); - final ProfileService service = new ProfileService( - Settings.EMPTY, - Clock.systemUTC(), - client, - profileIndex, - mock(ClusterService.class), - mock(FeatureService.class), - realms - ); + final ProfileService service = new ProfileService(Settings.EMPTY, Clock.systemUTC(), client, profileIndex, realms); final PlainActionFuture future = new PlainActionFuture<>(); service.maybeIncrementDifferentiatorAndCreateNewProfile( subject, @@ -593,10 +551,7 @@ public void testBuildSearchRequest() { public void testSecurityProfileOrigin() { // Activate profile doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); @SuppressWarnings("unchecked") final ActionListener listener = (ActionListener) invocation.getArguments()[2]; var resp = new MultiSearchResponse( @@ -616,10 +571,7 @@ public void testSecurityProfileOrigin() { final RuntimeException expectedException = new RuntimeException("expected"); doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(expectedException); return null; @@ -632,10 +584,7 @@ public void testSecurityProfileOrigin() { // Update doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(expectedException); return null; @@ -647,10 +596,7 @@ public void testSecurityProfileOrigin() { // Suggest doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(expectedException); return null; @@ -675,17 +621,7 @@ public void testActivateProfileWithDifferentUidFormats() throws IOException { return new DomainConfig(domainName, Set.of(), true, "suffix"); } }); - final ProfileService service = spy( - new ProfileService( - Settings.EMPTY, - Clock.systemUTC(), - client, - profileIndex, - mock(ClusterService.class), - mock(FeatureService.class), - realms - ) - ); + final ProfileService service = spy(new ProfileService(Settings.EMPTY, Clock.systemUTC(), client, profileIndex, realms)); doAnswer(invocation -> { @SuppressWarnings("unchecked") @@ -1098,10 +1034,7 @@ public void testProfileSearchForApiKeyOwnerWithoutDomain() throws Exception { MultiSearchResponse emptyMultiSearchResponse = new MultiSearchResponse(responseItems, randomNonNegativeLong()); try { doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocation.getArguments()[1]; assertThat(multiSearchRequest.requests(), iterableWithSize(1)); assertThat(multiSearchRequest.requests().get(0).source().query(), instanceOf(BoolQueryBuilder.class)); @@ -1153,10 +1086,7 @@ public void testProfileSearchForApiKeyOwnerWithDomain() throws Exception { MultiSearchResponse emptyMultiSearchResponse = new MultiSearchResponse(responseItems, randomNonNegativeLong()); try { doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocation.getArguments()[1]; assertThat(multiSearchRequest.requests(), iterableWithSize(1)); assertThat(multiSearchRequest.requests().get(0).source().query(), instanceOf(BoolQueryBuilder.class)); @@ -1218,10 +1148,7 @@ public void testProfileSearchForOwnerOfMultipleApiKeys() throws Exception { MultiSearchResponse emptyMultiSearchResponse = new MultiSearchResponse(responseItems, randomNonNegativeLong()); try { doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocation.getArguments()[1]; // a single search request for a single owner of multiple keys assertThat(multiSearchRequest.requests(), iterableWithSize(1)); @@ -1277,10 +1204,7 @@ public void testProfileSearchErrorForApiKeyOwner() { MultiSearchResponse multiSearchResponseWithError = new MultiSearchResponse(responseItems, randomNonNegativeLong()); try { doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); // a single search request for a single owner of multiple keys MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocation.getArguments()[1]; // 2 search requests for the 2 Api key owners @@ -1402,10 +1326,7 @@ private void mockMultiGetRequest(List sampleDocumentPar private void mockMultiGetRequest(List sampleDocumentParameters, Map errors) { doAnswer(invocation -> { - assertThat( - threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), - equalTo(useProfileOrigin ? SECURITY_PROFILE_ORIGIN : SECURITY_ORIGIN) - ); + assertThat(threadPool.getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME), equalTo(SECURITY_PROFILE_ORIGIN)); final MultiGetRequest multiGetRequest = (MultiGetRequest) invocation.getArguments()[1]; @SuppressWarnings("unchecked") final ActionListener listener = (ActionListener) invocation.getArguments()[2]; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java index 8509a6475aa71..5d4ea0f30cb15 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java @@ -58,7 +58,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, securityEnabled ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityEnabled ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java index b734e602ec291..6ff05faf22d11 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java @@ -56,7 +56,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; final var fakeRestRequest = new FakeRestRequest(); - final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, requiredSettingsEnabled ? 0 : 1); + final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), requiredSettingsEnabled ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 79dba637d53d0..9a05230d82ae6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -75,7 +75,7 @@ public void testCreateApiKeyApi() throws Exception { ).withParams(Collections.singletonMap("refresh", randomFrom("false", "true", "wait_for"))).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java index a47855731b37a..812354986d5bc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java @@ -115,7 +115,7 @@ public void testLicenseEnforcement() throws Exception { } }"""), XContentType.JSON).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index c65634a76b532..d88a217cd0949 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -91,7 +91,7 @@ public void testGetApiKey() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(params).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -159,7 +159,7 @@ public void testGetApiKeyWithProfileUid() throws Exception { } final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -224,7 +224,7 @@ public void testGetApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 2cb1b6a66b02b..ac472378d4874 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -77,7 +77,7 @@ public void testInvalidateApiKey() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -144,7 +144,7 @@ public void testInvalidateApiKeyOwnedByCurrentAuthenticatedUser() throws Excepti ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index 7005b5158e626..d5aa249b1d0f5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -110,7 +110,7 @@ public void testQueryParsing() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -184,7 +184,7 @@ public void testAggsAndAggregationsTogether() { XContentType.JSON ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -230,7 +230,7 @@ public void testParsingSearchParameters() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -290,7 +290,7 @@ public void testQueryApiKeyWithProfileUid() throws Exception { } FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java index 879e1ac8ad157..f2fe28b2a936f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java @@ -89,15 +89,12 @@ public void testLicenseEnforcement() throws Exception { // Disallow by license when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(false); - final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( - new BytesArray(""" - { - "metadata": {} - }"""), - XContentType.JSON - ).withParams(Map.of("id", randomAlphaOfLength(10))).build(); + final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray(""" + { + "metadata": {} + }"""), XContentType.JSON).withParams(Map.of("id", randomAlphaOfLength(10))).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java index bd665560f425f..2ac33a780313e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java @@ -43,7 +43,7 @@ public class RestGetTokenActionTests extends ESTestCase { public void testListenerHandlesExceptionProperly() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -67,7 +67,7 @@ public void sendResponse(RestResponse restResponse) { public void testSendResponse() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -114,7 +114,7 @@ public void sendResponse(RestResponse restResponse) { public void testSendResponseKerberosError() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java index 38405a2167808..4a593eeb24ac6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java @@ -73,7 +73,7 @@ public void testQueryParsing() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -132,7 +132,7 @@ public void testParsingSearchParameters() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index c5c5e14934408..e381663d4174e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -571,7 +571,11 @@ public void testClientChannelUsesSeparateSslConfigurationForRemoteCluster() thro final ConnectTransportException e = openConnectionExpectFailure(qcService, node, connectionProfile); assertThat( e.getRootCause().getMessage(), - anyOf(containsString("unable to find valid certification path"), containsString("Unable to find certificate chain")) + anyOf( + containsString("unable to find valid certification path"), + containsString("Unable to find certificate chain"), + containsString("Unable to construct a valid chain") + ) ); } diff --git a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle index 45bca88600495..60b0b372ba14c 100644 --- a/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle +++ b/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -13,7 +19,7 @@ dependencies { javaRestTestImplementation project(':x-pack:qa') } -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle index 32cab39f665d3..17996ce82a453 100644 --- a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle +++ b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle @@ -6,7 +6,7 @@ */ import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams + import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -36,7 +36,7 @@ tasks.register("copyTestNodeKeyMaterial", Copy) { into outputDir } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests diff --git a/x-pack/plugin/slm/build.gradle b/x-pack/plugin/slm/build.gradle index d9511fe67e8e0..b54e31315f709 100644 --- a/x-pack/plugin/slm/build.gradle +++ b/x-pack/plugin/slm/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/x-pack/plugin/slm/qa/multi-node/build.gradle b/x-pack/plugin/slm/qa/multi-node/build.gradle index 1f4b0c3b10c30..afbae8932e292 100644 --- a/x-pack/plugin/slm/qa/multi-node/build.gradle +++ b/x-pack/plugin/slm/qa/multi-node/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -31,7 +37,7 @@ testClusters.configureEach { setting 'logger.org.elasticsearch.xpack.slm', 'TRACE' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index 192807d667abb..cc01d5b101106 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -130,7 +130,6 @@ public Collection createComponents(PluginServices services) { SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, - services.featureService(), threadPool, client, services.xContentRegistry() diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java index 96b962f70a1b6..274dec75865a8 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleFeatures.java @@ -7,12 +7,9 @@ package org.elasticsearch.xpack.slm; -import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; -import java.util.Map; import java.util.Set; public class SnapshotLifecycleFeatures implements FeatureSpecification { @@ -20,9 +17,4 @@ public class SnapshotLifecycleFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of(SnapshotLifecycleService.INTERVAL_SCHEDULE); } - - @Override - public Map getHistoricalFeatures() { - return Map.of(SnapshotLifecycleTemplateRegistry.MANAGED_BY_DATA_STREAM_LIFECYCLE, Version.V_8_12_0); - } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java index f40ea5a56463a..31c624df67813 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java @@ -8,13 +8,10 @@ package org.elasticsearch.xpack.slm.history; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; @@ -47,13 +44,11 @@ public class SnapshotLifecycleTemplateRegistry extends IndexTemplateRegistry { // version 6: manage by data stream lifecycle // version 7: version the index template name so we can upgrade existing deployments public static final int INDEX_TEMPLATE_VERSION = 7; - public static final NodeFeature MANAGED_BY_DATA_STREAM_LIFECYCLE = new NodeFeature("slm-history-managed-by-dsl"); public static final String SLM_TEMPLATE_VERSION_VARIABLE = "xpack.slm.template.version"; public static final String SLM_TEMPLATE_NAME = ".slm-history-" + INDEX_TEMPLATE_VERSION; public static final String SLM_POLICY_NAME = "slm-history-ilm-policy"; - private final FeatureService featureService; @Override protected boolean requiresMasterNode() { @@ -65,13 +60,11 @@ protected boolean requiresMasterNode() { public SnapshotLifecycleTemplateRegistry( Settings nodeSettings, ClusterService clusterService, - FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); - this.featureService = featureService; slmHistoryEnabled = SLM_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); } @@ -122,9 +115,4 @@ public boolean validate(ClusterState state) { boolean allPoliciesPresent = maybePolicies.map(policies -> policies.keySet().containsAll(policyNames)).orElse(false); return allTemplatesPresent && allPoliciesPresent; } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - return featureService.clusterHasFeature(event.state(), MANAGED_BY_DATA_STREAM_LIFECYCLE); - } } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index b7674a2d60bff..0ab3e99e1efc9 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -64,12 +64,12 @@ public void testNextExecutionTimeSchedule() { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy( "id", "name", - "0 1 2 3 4 ? 2099", + "0 1 2 3 4 ? 2049", "repo", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - assertThat(p.calculateNextExecution(-1, Clock.systemUTC()), equalTo(4078864860000L)); + assertThat(p.calculateNextExecution(-1, Clock.systemUTC()), equalTo(2501028060000L)); } public void testNextExecutionTimeInterval() { diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java index d5a8faea1c0a0..8f25a4e70388e 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -48,7 +47,6 @@ import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.ilm.action.ILMActions; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; -import org.elasticsearch.xpack.slm.SnapshotLifecycleFeatures; import org.junit.After; import org.junit.Before; @@ -102,14 +100,7 @@ public void createRegistryAndClient() { ) ); xContentRegistry = new NamedXContentRegistry(entries); - registry = new SnapshotLifecycleTemplateRegistry( - Settings.EMPTY, - clusterService, - new FeatureService(List.of(new SnapshotLifecycleFeatures())), - threadPool, - client, - xContentRegistry - ); + registry = new SnapshotLifecycleTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); } @After @@ -124,7 +115,6 @@ public void testDisabledDoesNotAddTemplates() { SnapshotLifecycleTemplateRegistry disabledRegistry = new SnapshotLifecycleTemplateRegistry( settings, clusterService, - new FeatureService(List.of(new SnapshotLifecycleFeatures())), threadPool, client, xContentRegistry diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle index 03426bdddce62..7f69d6b7e56eb 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { @@ -39,7 +37,7 @@ tasks.named("javaRestTest").configure { systemProperty 'test.azure.container', azureContainer systemProperty 'test.azure.key', azureKey systemProperty 'test.azure.sas_token', azureSasToken - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_snapshot_based_recoveries_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_snapshot_based_recoveries_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java index 591d4582d5905..8142b40166840 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/qa/azure/src/javaRestTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/AzureSnapshotBasedRecoveryIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.snapshotbasedrecoveries.recovery; import fixture.azure.AzureHttpFixture; +import fixture.azure.MockAzureBlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Booleans; @@ -37,7 +38,8 @@ public class AzureSnapshotBasedRecoveryIT extends AbstractSnapshotBasedRecoveryR AZURE_TEST_CONTAINER, System.getProperty("test.azure.tenant_id"), System.getProperty("test.azure.client_id"), - AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT) + AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT), + MockAzureBlobStore.LeaseExpiryPredicate.NEVER_EXPIRE ); private static TestTrustStore trustStore = new TestTrustStore( diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle index 267ed84aa45d4..4d39ca95312aa 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-java-rest-test' @@ -28,7 +33,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_snapshot_based_recoveries_tests" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile diff --git a/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle b/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle index b669641363bd1..07909bf4cdbc1 100644 --- a/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' @@ -47,7 +45,7 @@ tasks.named("javaRestTest").configure { systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) nonInputProperties.systemProperty 'test.s3.base_path', - s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + BuildParams.testSeed : 'base_path_integration_tests' + s3BasePath ? s3BasePath + "_snapshot_based_recoveries_tests" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle index e304b2ff5c263..5f195e983d191 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ - -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -46,7 +44,7 @@ tasks.named("javaRestTest") { systemProperty 'test.azure.sas_token', azureSasToken systemProperty 'test.azure.tenant_id', azureTenantId systemProperty 'test.azure.client_id', azureClientId - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + buildParams.testSeed } tasks.register("azureThirdPartyTest") { @@ -61,7 +59,7 @@ tasks.register("managedIdentityJavaRestTest", RestIntegTestTask) { systemProperty 'test.azure.container', azureContainer // omitting key and sas_token so that we use a bearer token from the metadata service // omitting client_id and tenant_id so that we use a bearer token from the metadata service, not from workload identity - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + buildParams.testSeed } tasks.register("workloadIdentityJavaRestTest", RestIntegTestTask) { @@ -74,10 +72,10 @@ tasks.register("workloadIdentityJavaRestTest", RestIntegTestTask) { systemProperty 'test.azure.tenant_id', azureTenantId ?: "583d4f71-148a-4163-bad5-2311e13c60dc" systemProperty 'test.azure.client_id', azureClientId ?: "86dd1b33-96c1-4a2e-92ac-b844404fc691" // omitting key and sas_token so that we use a bearer token from workload identity - nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.azure.base_path', azureBasePath + "_repository_test_kit_tests_" + buildParams.testSeed } -if (BuildParams.inFipsJvm) { +if (buildParams.inFipsJvm) { // Cannot override the trust store in FIPS mode, and these tasks require a HTTPS fixture tasks.named("managedIdentityJavaRestTest").configure { enabled = false } tasks.named("workloadIdentityJavaRestTest").configure { enabled = false } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java index a9b8fe51c01cc..03906b3cf69da 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; import java.util.function.Predicate; import static org.hamcrest.Matchers.blankOrNullString; @@ -49,7 +50,10 @@ public class AzureRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRes AZURE_TEST_CONTAINER, AZURE_TEST_TENANT_ID, AZURE_TEST_CLIENT_ID, - decideAuthHeaderPredicate() + decideAuthHeaderPredicate(), + // 5% of the time, in a contended lease scenario, expire the existing lease + (currentLeaseId, requestLeaseId) -> currentLeaseId.equals(requestLeaseId) == false + && ThreadLocalRandom.current().nextDouble() < 0.05 ); private static Predicate decideAuthHeaderPredicate() { @@ -78,12 +82,6 @@ private static Predicate decideAuthHeaderPredicate() { () -> "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + fixture.getAddress(), s -> USE_FIXTURE ) - .apply(c -> { - if (USE_FIXTURE) { - // test fixture does not support CAS yet; TODO fix this - c.systemProperty("test.repository_test_kit.skip_cas", "true"); - } - }) .systemProperty( "tests.azure.credentials.disable_instance_discovery", () -> "true", diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle index 4f0a1c4faf0af..176a441279aab 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { @@ -36,7 +34,7 @@ if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { tasks.named("javaRestTest").configure { systemProperty 'test.google.fixture', Boolean.toString(useFixture) systemProperty 'test.gcs.bucket', gcsBucket - nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repository_test_kit_tests" + BuildParams.testSeed + nonInputProperties.systemProperty 'test.gcs.base_path', gcsBasePath + "_repository_test_kit_tests" + buildParams.testSeed if (useFixture == false) { systemProperty 'test.google.account', serviceAccountFile diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle index 3fbb55ca4eb3a..81eb82a522389 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' @@ -34,7 +32,7 @@ dependencies { tasks.named("javaRestTest").configure { usesDefaultDistribution() description = "Runs rest tests against an elasticsearch cluster with HDFS." - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) // required for krb5kdc-fixture to work jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java index b0068bd7bfdaf..3b5edaf768057 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java @@ -20,7 +20,12 @@ @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class MinioRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { - public static final MinioTestContainer minioFixture = new MinioTestContainer(); + public static final MinioTestContainer minioFixture = new MinioTestContainer( + true, + "s3_test_access_key", + "s3_test_secret_key", + "bucket" + ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle index 21cf952f05bf1..33398d5b8064b 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle @@ -5,8 +5,6 @@ * 2.0. */ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' @@ -45,7 +43,7 @@ tasks.named("javaRestTest").configure { systemProperty("s3AccessKey", s3AccessKey) systemProperty("s3SecretKey", s3SecretKey) nonInputProperties.systemProperty 'test.s3.base_path', - s3BasePath ? s3BasePath + "_repo_test_kit_tests" + BuildParams.testSeed : 'base_path_integration_tests' + s3BasePath ? s3BasePath + "_repo_test_kit_tests" + buildParams.testSeed : 'base_path_integration_tests' } tasks.register("s3ThirdPartyTest") { diff --git a/x-pack/plugin/spatial/build.gradle b/x-pack/plugin/spatial/build.gradle index 5bcec68c227ce..6299908f0dc14 100644 --- a/x-pack/plugin/spatial/build.gradle +++ b/x-pack/plugin/spatial/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' @@ -28,7 +33,7 @@ testClusters.configureEach { setting 'xpack.security.enabled', 'false' } -if (BuildParams.isSnapshotBuild() == false) { +if (buildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 85d778f9ec87f..69468bf574956 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -1,8 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-cluster-test' -import org.elasticsearch.gradle.internal.info.BuildParams - esplugin { name = 'x-pack-sql' description 'The Elasticsearch plugin that powers SQL for Elasticsearch' @@ -137,7 +142,7 @@ allprojects { } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("internalClusterTest").configure{enabled = false } } diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 138f3e63af462..d1b179f09e403 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -1,6 +1,6 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' description = 'JDBC driver for Elasticsearch' diff --git a/x-pack/plugin/sql/qa/jdbc/build.gradle b/x-pack/plugin/sql/qa/jdbc/build.gradle index 022306fe9b306..e93d3b72f1de9 100644 --- a/x-pack/plugin/sql/qa/jdbc/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/build.gradle @@ -1,7 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask description = 'Integration tests for SQL JDBC driver' @@ -72,11 +78,11 @@ subprojects { // Configure compatibility testing tasks // Compatibility testing for JDBC driver started with version 7.9.0 - BuildParams.bwcVersions.indexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> + buildParams.bwcVersions.indexCompatible.findAll({ it.onOrAfter(Version.fromString("7.9.0")) && it != VersionProperties.elasticsearchVersion }).each { bwcVersion -> def baseName = "v${bwcVersion}" def cluster = testClusters.register(baseName) - UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) + UnreleasedVersionInfo unreleasedVersion = buildParams.bwcVersions.unreleasedInfo(bwcVersion) Configuration driverConfiguration = configurations.create("jdbcDriver${baseName}") { // TODO: Temporary workaround for https://github.com/elastic/elasticsearch/issues/73433 transitive = false diff --git a/x-pack/plugin/sql/qa/jdbc/security/build.gradle b/x-pack/plugin/sql/qa/jdbc/security/build.gradle index c446755e91929..82510285cb996 100644 --- a/x-pack/plugin/sql/qa/jdbc/security/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/security/build.gradle @@ -1,4 +1,8 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.TestClusterValueSource +import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.testclusters.TestClustersRegistry +import org.elasticsearch.gradle.util.GradleUtils apply plugin: 'elasticsearch.internal-test-artifact' @@ -11,7 +15,10 @@ dependencies { Project mainProject = project + subprojects { + def clusterPath = getPath() + // Use tests from the root security qa project in subprojects configurations.create('testArtifacts').transitive(false) @@ -46,6 +53,17 @@ subprojects { dependsOn copyTestClasses classpath += configurations.testArtifacts testClassesDirs = project.files(testArtifactsDir) + + Provider serviceProvider = GradleUtils.getBuildService( + project.gradle.sharedServices, + TestClustersPlugin.REGISTRY_SERVICE_NAME + ) + project.getProviders().of(TestClusterValueSource.class) { + it.parameters.path.set(clusterPath) + it.parameters.clusterName.set("javaRestTest") + it.parameters.service = serviceProvider + } + nonInputProperties.systemProperty 'tests.audit.logfile', "${-> testClusters.javaRestTest.singleNode().getAuditLog()}" nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', diff --git a/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle index ec88fcffa941c..1637cad33c76d 100644 --- a/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.test-with-ssl' @@ -11,5 +16,5 @@ testClusters.configureEach { // JDBC client can only be configured for SSL with keystores, but we can't use JKS/PKCS12 keystores in FIPS 140-2 mode. tasks.withType(Test).configureEach { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/plugin/sql/qa/mixed-node/build.gradle b/x-pack/plugin/sql/qa/mixed-node/build.gradle index 412dec62f81f8..35600fda0eb33 100644 --- a/x-pack/plugin/sql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/sql/qa/mixed-node/build.gradle @@ -1,10 +1,16 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' -apply plugin: 'elasticsearch.bwc-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask +apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.bwc-test' + dependencies { javaRestTestImplementation project(':x-pack:qa') javaRestTestImplementation(project(xpackModule('ql:test-fixtures'))) @@ -19,7 +25,7 @@ testClusters.configureEach { tasks.named("javaRestTest").configure{ enabled = false} // A bug (https://github.com/elastic/elasticsearch/issues/68439) limits us to perform tests with versions from 7.10.3 onwards -BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.3") && +buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.3") && v != VersionProperties.getElasticsearchVersion()) { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { diff --git a/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle index 907d72e606bda..0b9c515c48be2 100644 --- a/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.test-with-ssl' @@ -6,7 +11,7 @@ tasks.named("javaRestTest").configure { // Do not attempt to form a cluster in a FIPS JVM, as doing so with a JKS keystore will fail. // TODO Revisit this when SQL CLI client can handle key/certificate instead of only Keystores. // https://github.com/elastic/elasticsearch/issues/32306 - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } testClusters.matching { it.name == "javaRestTest" }.configureEach { diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index b9713bcb8e7a3..bd8788191cfa2 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ /* * This project is named sql-cli because it is in the "org.elasticsearch.plugin" @@ -8,7 +13,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams */ apply plugin: 'elasticsearch.build' -apply plugin: 'com.github.johnrengelman.shadow' +apply plugin: 'com.gradleup.shadow' /* We don't use the 'application' plugin because it builds a zip and tgz which * we don't want. */ @@ -55,7 +60,7 @@ tasks.register("runcli") { description = 'Run the CLI and connect to elasticsearch running on 9200' dependsOn "shadowJar" doLast { - List command = ["${BuildParams.runtimeJavaHome}/bin/java"] + List command = ["${buildParams.runtimeJavaHome.get()}/bin/java"] if ('true'.equals(providers.systemProperty('debug').orElse('false').get())) { command += '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000' } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 411a4cda868f0..f9fed2b8f6a7d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -38,7 +38,6 @@ import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.ql.execution.search.FieldExtraction; import org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor; import org.elasticsearch.xpack.ql.execution.search.extractor.BucketExtractor; @@ -360,11 +359,6 @@ static class ImplicitGroupActionListener extends BaseAggActionListener { private static final List EMPTY_BUCKET = singletonList(new Bucket() { - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - throw new SqlIllegalArgumentException("No group-by/aggs defined"); - } - @Override public Object getKey() { throw new SqlIllegalArgumentException("No group-by/aggs defined"); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java index cb832cbd4b2d4..8f8f5917ae123 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java @@ -8,9 +8,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation.Bucket; -import org.elasticsearch.xcontent.XContentBuilder; -import java.io.IOException; import java.util.Map; class TestBucket implements Bucket { @@ -25,11 +23,6 @@ class TestBucket implements Bucket { this.aggs = aggs; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - throw new UnsupportedOperationException(); - } - @Override public Map getKey() { return key; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml index 83234901ae8f2..35cfbac5e3439 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/140_metadata.yml @@ -170,4 +170,43 @@ setup: catch: /cannot sort on _source/ esql.query: body: - query: 'FROM test metadata _source | sort _source' + query: 'FROM test metadata _source | SORT _source' + +--- +"sort returning _source is allowed": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [sort_returning_source_ok] + reason: "Sorts returning _source should be ok, but weren't in older versions" + - do: + esql.query: + body: + query: 'FROM test METADATA _source | SORT case ASC | KEEP case, _source | LIMIT 5' + - length: { columns: 2 } + - length: { values: 3 } + - match: {columns.0.name: "case"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "_source"} + - match: {columns.1.type: "_source"} + - match: {values.0.0: "all_ignored"} + - match: {values.0.1: { + "integer" : "not-an-integer", + "keyword" : "long-keyword", + "case" : "all_ignored" + }} + - match: {values.1.0: "integer_ignored"} + - match: {values.1.1: { + "integer" : "not-an-integer", + "keyword" : "ok", + "case" : "integer_ignored" + }} + - match: {values.2.0: "ok"} + - match: {values.2.1: { + "integer" : 10, + "keyword" : "ok", + "case" : "ok" + }} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml index ea7684fb69a09..9fbe69ac05f0a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/26_aggs_bucket.yml @@ -234,3 +234,58 @@ - match: { values.2.1: "2024-08-01T00:00:00.000Z" } - match: { values.3.0: 1 } - match: { values.3.1: "2024-09-01T00:00:00.000Z" } + +--- +"Datetime interval as string": + - requires: + test_runner_features: [allowed_warnings_regex, capabilities] + capabilities: + - method: POST + path: /_query + parameters: [ ] + capabilities: [ implicit_casting_string_literal_to_temporal_amount ] + reason: "interval in parameters as string" + + - do: + indices.create: + index: test_bucket + body: + mappings: + properties: + ts : + type : date + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-06-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-07-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-08-16" } + - { "index": { "_index": "test_bucket" } } + - { "ts": "2024-09-16" } + + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM test_bucket | STATS c = COUNT(*) BY b = BUCKET(ts, ?bucket) | SORT b' + params: [{"bucket" : "1 month"}] + + - match: { columns.0.name: c } + - match: { columns.0.type: long } + - match: { columns.1.name: b } + - match: { columns.1.type: date } + - length: { values: 4 } + - match: { values.0.0: 1 } + - match: { values.0.1: "2024-06-01T00:00:00.000Z" } + - match: { values.1.0: 1 } + - match: { values.1.1: "2024-07-01T00:00:00.000Z" } + - match: { values.2.0: 1 } + - match: { values.2.1: "2024-08-01T00:00:00.000Z" } + - match: { values.3.0: 1 } + - match: { values.3.1: "2024-09-01T00:00:00.000Z" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 4c3b16c5dc309..f7dd979540afa 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -92,7 +92,7 @@ setup: - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} # Testing for the entire function set isn't feasbile, so we just check that we return the correct count as an approximation. - - length: {esql.functions: 120} # check the "sister" test below for a likely update to the same esql.functions length check + - length: {esql.functions: 122} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": @@ -163,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 117} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 118} # check the "sister" test above for a likely update to the same esql.functions length check diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml index 076bf116292d0..3f2bcb4ed7f4d 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/61_enrich_ip.yml @@ -162,14 +162,38 @@ teardown: --- "Invalid IP strings": - requires: - cluster_features: ["gte_v8.14.0"] - reason: "IP range ENRICH support was added in 8.14.0" + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [enrich_strict_range_types] + reason: "Runtime range type checking was added" + test_runner_features: [capabilities, allowed_warnings_regex, warnings_regex] - do: - catch: /'invalid_[\d\.]+' is not an IP string literal/ + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + - "Line (1:68|-1:-1): evaluation of \\[(ENRICH networks-policy ON ip_str|)\\] failed, treating result as null. Only first 20 failures recorded." + - "Line (1:68|-1:-1): java.lang.IllegalArgumentException: 'invalid_' is not an IP string literal." + esql.query: body: - query: 'FROM events | eval ip_str = concat("invalid_", to_string(ip)) | ENRICH networks-policy ON ip_str | sort @timestamp | KEEP ip, name, department, message' + query: 'FROM events | eval ip_str = mv_concat("invalid_", to_string(ip)) | ENRICH networks-policy ON ip_str | sort @timestamp | KEEP ip, name, department, message' + + - match: { columns.0.name: "ip" } + - match: { columns.0.type: "ip" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "department" } + - match: { columns.2.type: "keyword" } + - match: { columns.3.name: "message" } + - match: { columns.3.type: "keyword" } + + - length: { values: 4 } + - match: { values.0: [ "10.100.0.21", null, null, "network connected" ] } + - match: { values.1: [ [ "10.100.0.21", "10.101.0.107" ], null, null, "sending messages" ] } + - match: { values.2: [ "10.101.0.107" , null, null, "network disconnected" ] } + - match: { values.3: [ "13.101.0.114" , null, null, "authentication failed" ] } --- "IP": diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/63_enrich_int_range.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/63_enrich_int_range.yml new file mode 100644 index 0000000000000..4d84a10507504 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/63_enrich_int_range.yml @@ -0,0 +1,199 @@ +--- +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [enrich_strict_range_types] + reason: "Strict range type checking was added" + test_runner_features: [capabilities, allowed_warnings_regex, warnings_regex] + + - do: + indices.create: + index: ages + body: + settings: + index.number_of_shards: 1 + index.routing.rebalance.enable: "none" + mappings: + properties: + age_range: + type: "integer_range" + description: + type: "keyword" + + - do: + bulk: + index: ages + refresh: true + body: + - { "index": { } } + - { "age_range": { "gte": 0, "lt": 2 }, "description": "Baby" } + - { "index": { } } + - { "age_range": { "gte": 2, "lt": 4 }, "description": "Toddler" } + - { "index": { } } + - { "age_range": { "gte": 3, "lt": 5 }, "description": "Preschooler" } + - { "index": { } } + - { "age_range": { "gte": 5, "lt": 12 }, "description": "Child" } + - { "index": { } } + - { "age_range": { "gte": 13, "lt": 20 }, "description": "Adolescent" } + - { "index": { } } + - { "age_range": { "gte": 20, "lt": 40 }, "description": "Young Adult" } + - { "index": { } } + - { "age_range": { "gte": 40, "lt": 60 }, "description": "Middle-aged" } + - { "index": { } } + - { "age_range": { "gte": 60, "lt": 80 }, "description": "Senior" } + - { "index": { } } + - { "age_range": { "gte": 80, "lt": 100 }, "description": "Elderly" } + - { "index": { } } + - { "age_range": { "gte": 100, "lt": 200 }, "description": "Incredible" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + wait_for_events: languid + + - do: + enrich.put_policy: + name: ages-policy + body: + range: + indices: [ "ages" ] + match_field: "age_range" + enrich_fields: [ "description" ] + + - do: + enrich.execute_policy: + name: ages-policy + + - do: + indices.create: + index: employees + body: + mappings: + properties: + name: + type: keyword + age: + type: integer + ak: + type: keyword + salary: + type: double + + - do: + bulk: + index: employees + refresh: true + body: + - { "index": { } } + - { "name": "Joe Soap", "age": 36, "ak": "36", "salary": 55.55 } + - { "index": { } } + - { "name": "Jane Doe", "age": 31, "ak": "31", "salary": 55.55 } + - { "index": { } } + - { "name": "Jane Immortal", "age": -1, "ak": "immortal", "salary": 55.55 } + - { "index": { } } + - { "name": "Magic Mike", "age": 44, "ak": "44", "salary": 55.55 } + - { "index": { } } + - { "name": "Anon Ymous", "age": 61, "ak": "61", "salary": 55.55 } + +--- +teardown: + - do: + enrich.delete_policy: + name: ages-policy + +--- +"ages": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM employees | ENRICH ages-policy ON age | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 4 } + - match: { values.0: [ 2, "Young Adult" ] } + - match: { values.1: [ 1, "Middle-aged" ] } + - match: { values.2: [ 1, "Senior" ] } + - match: { values.3: [ 1, null ] } + +--- +"ages as typecast keywords": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + - "Line 1:29: evaluation of \\[ak::integer\\] failed, treating result as null. Only first 20 failures recorded." + - "Line 1:29: org.elasticsearch.xpack.esql.core.InvalidArgumentException: Cannot parse number \\[immortal\\]" + esql.query: + body: + query: 'FROM employees | EVAL aki = ak::integer | ENRICH ages-policy ON aki | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 4 } + - match: { values.0: [ 2, "Young Adult" ] } + - match: { values.1: [ 1, "Middle-aged" ] } + - match: { values.2: [ 1, "Senior" ] } + - match: { values.3: [ 1, null ] } + +--- +"ages as keywords": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + - "Line (1:18|-1:-1): evaluation of \\[(ENRICH ages-policy ON ak|)\\] failed, treating result as null. Only first 20 failures recorded." + - 'Line (1:18|-1:-1): java.lang.NumberFormatException: For input string: \\"immortal\\"' + esql.query: + body: + query: 'FROM employees | ENRICH ages-policy ON ak | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 4 } + - match: { values.0: [ 2, "Young Adult" ] } + - match: { values.1: [ 1, "Middle-aged" ] } + - match: { values.2: [ 1, "Senior" ] } + - match: { values.3: [ 1, null ] } + +--- +"Invalid age as keyword": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + - "Line (1:18|-1:-1): evaluation of \\[(ENRICH ages-policy ON name|)\\] failed, treating result as null. Only first 20 failures recorded." + - 'Line (1:18|-1:-1): java.lang.NumberFormatException: For input string: \\"Joe Soap\\"' + - 'Line (1:18|-1:-1): java.lang.NumberFormatException: For input string: \\"Jane Doe\\"' + - 'Line (1:18|-1:-1): java.lang.NumberFormatException: For input string: \\"Jane Immortal\\"' + - 'Line (1:18|-1:-1): java.lang.NumberFormatException: For input string: \\"Magic Mike\\"' + - 'Line (1:18|-1:-1): java.lang.NumberFormatException: For input string: \\"Anon Ymous\\"' + esql.query: + body: + query: 'FROM employees | ENRICH ages-policy ON name | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ 5, null ] } + +--- +"Invalid age as double": + - do: + catch: /ENRICH range and input types are incompatible. range\[INTEGER\], input\[DOUBLE\]/ + esql.query: + body: + query: 'FROM employees | ENRICH ages-policy ON salary | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/64_enrich_int_match.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/64_enrich_int_match.yml new file mode 100644 index 0000000000000..ef11e5098f5c2 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/64_enrich_int_match.yml @@ -0,0 +1,222 @@ +--- +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [enrich_strict_range_types] + reason: "Strict range type checking was added" + test_runner_features: [capabilities, allowed_warnings_regex, warnings_regex] + + - do: + indices.create: + index: decades + body: + settings: + index.number_of_shards: 1 + index.routing.rebalance.enable: "none" + mappings: + properties: + decade: + type: "integer" + description: + type: "keyword" + + - do: + bulk: + index: decades + refresh: true + body: + - { "index": { } } + - { "decade": 1900, "description": "Gay Nineties" } + - { "index": { } } + - { "decade": 1910, "description": "Teens" } + - { "index": { } } + - { "decade": 1920, "description": "Roaring Twenties" } + - { "index": { } } + - { "decade": 1930, "description": "Dirty Thirties" } + - { "index": { } } + - { "decade": 1940, "description": "War Years" } + - { "index": { } } + - { "decade": 1950, "description": "Fabulous Fifties" } + - { "index": { } } + - { "decade": 1960, "description": "Swinging Sixties" } + - { "index": { } } + - { "decade": 1970, "description": "Me Decade" } + - { "index": { } } + - { "decade": 1980, "description": "Decade of Greed" } + - { "index": { } } + - { "decade": 1990, "description": "Nineties" } + - { "index": { } } + - { "decade": 2000, "description": "Aughts" } + - { "index": { } } + - { "decade": 2010, "description": "Digital Age" } + - { "index": { } } + - { "decade": 2020, "description": "Roaring Twenties 2.0" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + wait_for_events: languid + + - do: + enrich.put_policy: + name: decades-policy + body: + match: + indices: [ "decades" ] + match_field: "decade" + enrich_fields: [ "description" ] + + - do: + enrich.execute_policy: + name: decades-policy + + - do: + indices.create: + index: songs + body: + mappings: + properties: + title: + type: keyword + year: + type: integer + singer: + type: keyword + + - do: + bulk: + index: songs + refresh: true + body: + - { "index": { } } + - { "singer": "Louis Armstrong", "title": "What a Wonderful World", "year": 1967 } + - { "index": { } } + - { "singer": "The Beatles", "title": "Hey Jude", "year": 1968 } + - { "index": { } } + - { "singer": "Elvis Presley", "title": "Jailhouse Rock", "year": 1957 } + - { "index": { } } + - { "singer": "Billie Holiday", "title": "Strange Fruit", "year": 1939 } + - { "index": { } } + - { "singer": "Frank Sinatra", "title": "Fly Me to the Moon", "year": 1964 } + - { "index": { } } + - { "singer": "Bob Dylan", "title": "Blowin' in the Wind", "year": 1963 } + - { "index": { } } + - { "singer": "Queen", "title": "Bohemian Rhapsody", "year": 1975 } + - { "index": { } } + - { "singer": "ABBA", "title": "Dancing Queen", "year": 1976 } + - { "index": { } } + - { "singer": "Michael Jackson", "title": "Thriller", "year": 1982 } + - { "index": { } } + - { "singer": "Nirvana", "title": "Smells Like Teen Spirit", "year": 1991 } + - { "index": { } } + - { "singer": "Whitney Houston", "title": "I Will Always Love You", "year": 1992 } + - { "index": { } } + - { "singer": "Aretha Franklin", "title": "Respect", "year": 1967 } + - { "index": { } } + - { "singer": "Chuck Berry", "title": "Johnny B. Goode", "year": 1958 } + - { "index": { } } + - { "singer": "Madonna", "title": "Like a Prayer", "year": 1989 } + - { "index": { } } + - { "singer": "The Rolling Stones", "title": "(I Can't Get No) Satisfaction", "year": 1965 } + - { "index": { } } + - { "singer": "Beyoncé", "title": "Single Ladies (Put a Ring on It)", "year": 2008 } + - { "index": { } } + - { "singer": "Adele", "title": "Rolling in the Deep", "year": 2010 } + - { "index": { } } + - { "singer": "Lady Gaga", "title": "Bad Romance", "year": 2009 } + - { "index": { } } + - { "singer": "Billie Eilish", "title": "Bad Guy", "year": 2019 } + - { "index": { } } + - { "singer": "Taylor Swift", "title": "Anti-Hero", "year": 2022 } + +--- +teardown: + - do: + enrich.delete_policy: + name: decades-policy + +--- +"decades": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM songs | EVAL decade = (10*FLOOR(year/10))::integer | ENRICH decades-policy ON decade | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 9 } + - match: { values.0: [ 6, "Swinging Sixties" ] } + - match: { values.1: [ 2, "Aughts" ] } + - match: { values.2: [ 2, "Decade of Greed" ] } + - match: { values.3: [ 2, "Digital Age" ] } + - match: { values.4: [ 2, "Fabulous Fifties" ] } + - match: { values.5: [ 2, "Me Decade" ] } + - match: { values.6: [ 2, "Nineties" ] } + - match: { values.7: [ 1, "Dirty Thirties" ] } + - match: { values.8: [ 1, "Roaring Twenties 2.0" ] } + +--- +"decades as typecast keywords": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM songs | EVAL decade = (10*FLOOR(year/10))::keyword | ENRICH decades-policy ON decade | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 9 } + - match: { values.0: [ 6, "Swinging Sixties" ] } + - match: { values.1: [ 2, "Aughts" ] } + - match: { values.2: [ 2, "Decade of Greed" ] } + - match: { values.3: [ 2, "Digital Age" ] } + - match: { values.4: [ 2, "Fabulous Fifties" ] } + - match: { values.5: [ 2, "Me Decade" ] } + - match: { values.6: [ 2, "Nineties" ] } + - match: { values.7: [ 1, "Dirty Thirties" ] } + - match: { values.8: [ 1, "Roaring Twenties 2.0" ] } + +--- +"Invalid decade as keyword": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM songs | ENRICH decades-policy ON singer | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ 20, null ] } + +--- +"Invalid decade as double": + - do: + allowed_warnings_regex: + - "No limit defined, adding default limit of \\[.*\\]" + esql.query: + body: + query: 'FROM songs | EVAL decade = 10.0*FLOOR(year/10) | ENRICH decades-policy ON decade | STATS count=COUNT(*) BY description | SORT count DESC, description ASC' + + - match: { columns.0.name: "count" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "description" } + - match: { columns.1.type: "keyword" } + + - length: { values: 1 } + - match: { values.0: [ 20, null ] } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java index b2dc04c1178e4..c89a8237d40b7 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistry.java @@ -10,12 +10,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -35,7 +33,6 @@ import java.util.Map; import static org.elasticsearch.xpack.stack.StackTemplateRegistry.STACK_TEMPLATES_ENABLED; -import static org.elasticsearch.xpack.stack.StackTemplateRegistry.STACK_TEMPLATES_FEATURE; @Deprecated(since = "8.12.0", forRemoval = true) public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { @@ -48,7 +45,6 @@ public class LegacyStackTemplateRegistry extends IndexTemplateRegistry { public static final String TEMPLATE_VERSION_VARIABLE = "xpack.stack.template.version"; private final ClusterService clusterService; - private final FeatureService featureService; private volatile boolean stackTemplateEnabled; private static final Map ADDITIONAL_TEMPLATE_VARIABLES = Map.of("xpack.stack.template.deprecated", "true"); @@ -95,12 +91,10 @@ public LegacyStackTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry, - FeatureService featureService + NamedXContentRegistry xContentRegistry ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); this.clusterService = clusterService; - this.featureService = featureService; this.stackTemplateEnabled = STACK_TEMPLATES_ENABLED.get(nodeSettings); } @@ -282,12 +276,4 @@ protected boolean requiresMasterNode() { // there and the ActionNotFoundTransportException errors are then prevented. return true; } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - // Ensure current version of the components are installed only once all nodes are updated to 8.9.0. - // This is necessary to prevent an error caused nby the usage of the ignore_missing_pipeline property - // in the pipeline processor, which has been introduced only in 8.9.0 - return featureService.clusterHasFeature(event.state(), STACK_TEMPLATES_FEATURE); - } } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java index 71d01798323d3..73c18a3cc2619 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java @@ -33,8 +33,7 @@ public Collection createComponents(PluginServices services) { services.clusterService(), services.threadPool(), services.client(), - services.xContentRegistry(), - services.featureService() + services.xContentRegistry() ); legacyStackTemplateRegistry.initialize(); StackTemplateRegistry stackTemplateRegistry = new StackTemplateRegistry( @@ -42,8 +41,7 @@ public Collection createComponents(PluginServices services) { services.clusterService(), services.threadPool(), services.client(), - services.xContentRegistry(), - services.featureService() + services.xContentRegistry() ); stackTemplateRegistry.initialize(); return List.of(legacyStackTemplateRegistry, stackTemplateRegistry); diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java index ce1b664a46887..aeb9bf2bfa5cb 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplateRegistry.java @@ -10,14 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -38,13 +35,6 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { private static final Logger logger = LogManager.getLogger(StackTemplateRegistry.class); - // Historical node feature kept here as LegacyStackTemplateRegistry is deprecated - public static final NodeFeature STACK_TEMPLATES_FEATURE = new NodeFeature("stack.templates_supported"); - - // this node feature is a redefinition of {@link DataStreamFeatures#DATA_STREAM_LIFECYCLE} and it's meant to avoid adding a - // dependency to the data-streams module just for this - public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); - // The stack template registry version. This number must be incremented when we make changes // to built-in templates. public static final int REGISTRY_VERSION = 14; @@ -58,7 +48,6 @@ public class StackTemplateRegistry extends IndexTemplateRegistry { ); private final ClusterService clusterService; - private final FeatureService featureService; private final Map componentTemplateConfigs; private volatile boolean stackTemplateEnabled; @@ -121,12 +110,10 @@ public StackTemplateRegistry( ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry, - FeatureService featureService + NamedXContentRegistry xContentRegistry ) { super(nodeSettings, clusterService, threadPool, client, xContentRegistry); this.clusterService = clusterService; - this.featureService = featureService; this.stackTemplateEnabled = STACK_TEMPLATES_ENABLED.get(nodeSettings); this.componentTemplateConfigs = loadComponentTemplateConfigs(); } @@ -355,11 +342,4 @@ protected boolean requiresMasterNode() { // there and the ActionNotFoundTransportException errors are then prevented. return true; } - - @Override - protected boolean isClusterReady(ClusterChangedEvent event) { - // Ensure current version of the components are installed only after versions that support data stream lifecycle - // due to .kibana-reporting making use of the feature - return featureService.clusterHasFeature(event.state(), DATA_STREAM_LIFECYCLE); - } } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplatesFeatures.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplatesFeatures.java deleted file mode 100644 index 7b05231fcfd15..0000000000000 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackTemplatesFeatures.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.stack; - -import org.elasticsearch.Version; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Map; - -public class StackTemplatesFeatures implements FeatureSpecification { - @Override - public Map getHistoricalFeatures() { - return Map.of(StackTemplateRegistry.STACK_TEMPLATES_FEATURE, Version.V_8_9_0); - } -} diff --git a/x-pack/plugin/stack/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/stack/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification deleted file mode 100644 index 30a1498a54725..0000000000000 --- a/x-pack/plugin/stack/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ /dev/null @@ -1,8 +0,0 @@ -# -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License -# 2.0; you may not use this file except in compliance with the Elastic License -# 2.0. -# - -org.elasticsearch.xpack.stack.StackTemplatesFeatures diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java index 39f58e638aa68..654cf494e0e6f 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/LegacyStackTemplateRegistryTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -25,8 +24,6 @@ import org.junit.After; import org.junit.Before; -import java.util.List; - public class LegacyStackTemplateRegistryTests extends ESTestCase { private LegacyStackTemplateRegistry registry; private ThreadPool threadPool; @@ -36,15 +33,7 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); Client client = new NoOpClient(threadPool); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - var featureService = new FeatureService(List.of(new StackTemplatesFeatures())); - registry = new LegacyStackTemplateRegistry( - Settings.EMPTY, - clusterService, - threadPool, - client, - NamedXContentRegistry.EMPTY, - featureService - ); + registry = new LegacyStackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY); } @After @@ -67,7 +56,7 @@ public void testThatTemplatesAreDeprecated() { registry.getIngestPipelines() .stream() .map(ipc -> new PipelineConfiguration(ipc.getId(), ipc.loadConfig(), XContentType.JSON)) - .map(PipelineConfiguration::getConfigAsMap) + .map(PipelineConfiguration::getConfig) .forEach(p -> assertTrue((Boolean) p.get("deprecated"))); } diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java index c1c855867599a..7f674e24658dd 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackRegistryWithNonRequiredTemplates.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.template.IndexTemplateConfig; @@ -24,10 +23,9 @@ class StackRegistryWithNonRequiredTemplates extends StackTemplateRegistry { ClusterService clusterService, ThreadPool threadPool, Client client, - NamedXContentRegistry xContentRegistry, - FeatureService featureService + NamedXContentRegistry xContentRegistry ) { - super(nodeSettings, clusterService, threadPool, client, xContentRegistry, featureService); + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); } @Override diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index 25ff3b5311fa2..a8043f3d5e4e5 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.stack; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -29,8 +28,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.DataStreamFeatures; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -71,7 +68,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -82,22 +78,13 @@ public class StackTemplateRegistryTests extends ESTestCase { private ClusterService clusterService; private ThreadPool threadPool; private VerifyingClient client; - private FeatureService featureService; @Before public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); clusterService = ClusterServiceUtils.createClusterService(threadPool); - featureService = new FeatureService(List.of(new StackTemplatesFeatures(), new DataStreamFeatures())); - registry = new StackTemplateRegistry( - Settings.EMPTY, - clusterService, - threadPool, - client, - NamedXContentRegistry.EMPTY, - featureService - ); + registry = new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY); } @After @@ -114,8 +101,7 @@ public void testDisabledDoesNotAddIndexTemplates() { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY, - featureService + NamedXContentRegistry.EMPTY ); assertThat(disabledRegistry.getComposableTemplateConfigs(), anEmptyMap()); } @@ -127,8 +113,7 @@ public void testDisabledStillAddsComponentTemplatesAndIlmPolicies() { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY, - featureService + NamedXContentRegistry.EMPTY ); assertThat(disabledRegistry.getComponentTemplateConfigs(), not(anEmptyMap())); assertThat( @@ -371,8 +356,7 @@ public void testMissingNonRequiredTemplates() throws Exception { clusterService, threadPool, client, - NamedXContentRegistry.EMPTY, - featureService + NamedXContentRegistry.EMPTY ); DiscoveryNode node = DiscoveryNodeUtils.create("node"); @@ -519,25 +503,6 @@ public void testThatMissingMasterNodeDoesNothing() { registry.clusterChanged(event); } - public void testThatNothingIsInstalledWhenAllNodesAreNotUpdated() { - DiscoveryNode updatedNode = DiscoveryNodeUtils.create("updatedNode"); - DiscoveryNode outdatedNode = DiscoveryNodeUtils.create("outdatedNode", ESTestCase.buildNewFakeTransportAddress(), Version.V_8_10_0); - DiscoveryNodes nodes = DiscoveryNodes.builder() - .localNodeId("updatedNode") - .masterNodeId("updatedNode") - .add(updatedNode) - .add(outdatedNode) - .build(); - - client.setVerifier((a, r, l) -> { - fail("if some cluster mode are not updated to at least v.8.11.0 nothing should happen"); - return null; - }); - - ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyMap(), nodes); - registry.clusterChanged(event); - } - public void testThatTemplatesAreNotDeprecated() { for (ComposableIndexTemplate it : registry.getComposableTemplateConfigs().values()) { assertFalse(it.isDeprecated()); @@ -551,15 +516,10 @@ public void testThatTemplatesAreNotDeprecated() { registry.getIngestPipelines() .stream() .map(ipc -> new PipelineConfiguration(ipc.getId(), ipc.loadConfig(), XContentType.JSON)) - .map(PipelineConfiguration::getConfigAsMap) + .map(PipelineConfiguration::getConfig) .forEach(p -> assertFalse((Boolean) p.get("deprecated"))); } - public void testDataStreamLifecycleNodeFeatureId() { - // let's make sure these ids remain in-sync - assertThat(StackTemplateRegistry.DATA_STREAM_LIFECYCLE.id(), is(DataStreamFeatures.DATA_STREAM_LIFECYCLE.id())); - } - // ------------- /** diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java index 5078572dee5fd..f47a25409b821 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/rest/RestFindStructureAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.textstructure.rest; -import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.BaseRestHandler; @@ -50,14 +50,14 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { FindStructureAction.Request request = new FindStructureAction.Request(); RestFindStructureArgumentsParser.parse(restRequest, request); + var content = restRequest.requiredContent(); + request.setSample(content); - if (restRequest.hasContent()) { - request.setSample(restRequest.content()); - } else { - throw new ElasticsearchParseException("request body is required"); - } - - return channel -> client.execute(FindStructureAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute( + FindStructureAction.INSTANCE, + request, + ActionListener.withRef(new RestToXContentListener<>(channel), content) + ); } @Override diff --git a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle index b429e123bb631..b4ee0bee76d9d 100644 --- a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle @@ -1,8 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -12,7 +18,7 @@ dependencies { testImplementation project(':x-pack:qa') } -Version ccsCompatVersion = BuildParams.bwcVersions.minimumWireCompatibleVersion +Version ccsCompatVersion = buildParams.bwcVersions.minimumWireCompatibleVersion restResources { restApi { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java index 36237d2705205..60f00da195974 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -282,7 +283,10 @@ private void getPreview( builder.startObject(); builder.field("docs", results); builder.endObject(); - var pipelineRequest = new SimulatePipelineRequest(BytesReference.bytes(builder), XContentType.JSON); + var pipelineRequest = new SimulatePipelineRequest( + ReleasableBytesReference.wrap(BytesReference.bytes(builder)), + XContentType.JSON + ); pipelineRequest.setId(pipeline); parentTaskClient.execute(SimulatePipelineAction.INSTANCE, pipelineRequest, pipelineResponseActionListener); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java index 681ec38e9a57a..7359071996cc8 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java @@ -918,14 +918,14 @@ public void testRangeAggExtractor() { Aggregation agg = createRangeAgg( "p_agg", List.of( - new InternalRange.Bucket(null, Double.NEGATIVE_INFINITY, 10.5, 10, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket(null, 10.5, 19.5, 30, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket(null, 19.5, 200, 30, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket(null, 20, Double.POSITIVE_INFINITY, 0, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket(null, -10, -5, 0, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket(null, -11.0, -6.0, 0, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket(null, -11.0, 0, 0, InternalAggregations.EMPTY, false, DocValueFormat.RAW), - new InternalRange.Bucket("custom-0", 0, 10, 777, InternalAggregations.EMPTY, false, DocValueFormat.RAW) + new InternalRange.Bucket(null, Double.NEGATIVE_INFINITY, 10.5, 10, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket(null, 10.5, 19.5, 30, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket(null, 19.5, 200, 30, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket(null, 20, Double.POSITIVE_INFINITY, 0, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket(null, -10, -5, 0, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket(null, -11.0, -6.0, 0, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket(null, -11.0, 0, 0, InternalAggregations.EMPTY, DocValueFormat.RAW), + new InternalRange.Bucket("custom-0", 0, 10, 777, InternalAggregations.EMPTY, DocValueFormat.RAW) ) ); assertThat( diff --git a/x-pack/plugin/watcher/qa/rest/build.gradle b/x-pack/plugin/watcher/qa/rest/build.gradle index a911c022212b2..2d5fc8349b5e0 100644 --- a/x-pack/plugin/watcher/qa/rest/build.gradle +++ b/x-pack/plugin/watcher/qa/rest/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-test' @@ -29,7 +34,7 @@ testClusters.configureEach { setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/put_watch/11_timezoned_schedules.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/put_watch/11_timezoned_schedules.yml new file mode 100644 index 0000000000000..0371443367603 --- /dev/null +++ b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/put_watch/11_timezoned_schedules.yml @@ -0,0 +1,121 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test put watch api with timezone": + - do: + watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "timezone": "America/Los_Angeles", + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test" + } + } + } + } + - match: { _id: "my_watch" } + - do: + watcher.get_watch: + id: "my_watch" + - match: { watch.trigger.schedule.timezone: "America/Los_Angeles" } + +--- +"Test put watch api without timezone": + - do: + watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test" + } + } + } + } + - match: { _id: "my_watch" } + - do: + watcher.get_watch: + id: "my_watch" + - is_false: watch.trigger.schedule.timezone + +--- +"Reject put watch with invalid timezone": + - do: + watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "timezone": "Pangea/Tethys", + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test" + } + } + } + } + catch: bad_request + - match: { error.type: "parse_exception" } + - match: { error.reason: "could not parse schedule. invalid timezone [Pangea/Tethys]" } + - match: { error.caused_by.type: "zone_rules_exception" } + - match: { error.caused_by.reason: "Unknown time-zone ID: Pangea/Tethys" } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java index d11cb7521976a..a979d614fe38f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -26,7 +26,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Predicate; @@ -56,9 +58,72 @@ public class EmailService extends NotificationService { (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope) ); + private static final List ALLOW_ALL_DEFAULT = List.of("*"); + private static final Setting> SETTING_DOMAIN_ALLOWLIST = Setting.stringListSetting( "xpack.notification.email.account.domain_allowlist", - List.of("*"), + ALLOW_ALL_DEFAULT, + new Setting.Validator<>() { + @Override + public void validate(List value) { + // Ignored + } + + @Override + @SuppressWarnings("unchecked") + public void validate(List value, Map, Object> settings) { + List recipientAllowPatterns = (List) settings.get(SETTING_RECIPIENT_ALLOW_PATTERNS); + if (value.equals(ALLOW_ALL_DEFAULT) == false && recipientAllowPatterns.equals(ALLOW_ALL_DEFAULT) == false) { + throw new IllegalArgumentException( + "Cannot set both [" + + SETTING_RECIPIENT_ALLOW_PATTERNS.getKey() + + "] and [" + + SETTING_DOMAIN_ALLOWLIST.getKey() + + "] to a non [\"*\"] value at the same time." + ); + } + } + + @Override + public Iterator> settings() { + List> settingRecipientAllowPatterns = List.of(SETTING_RECIPIENT_ALLOW_PATTERNS); + return settingRecipientAllowPatterns.iterator(); + } + }, + Property.Dynamic, + Property.NodeScope + ); + + private static final Setting> SETTING_RECIPIENT_ALLOW_PATTERNS = Setting.stringListSetting( + "xpack.notification.email.recipient_allowlist", + ALLOW_ALL_DEFAULT, + new Setting.Validator<>() { + @Override + public void validate(List value) { + // Ignored + } + + @Override + @SuppressWarnings("unchecked") + public void validate(List value, Map, Object> settings) { + List domainAllowList = (List) settings.get(SETTING_DOMAIN_ALLOWLIST); + if (value.equals(ALLOW_ALL_DEFAULT) == false && domainAllowList.equals(ALLOW_ALL_DEFAULT) == false) { + throw new IllegalArgumentException( + "Connect set both [" + + SETTING_RECIPIENT_ALLOW_PATTERNS.getKey() + + "] and [" + + SETTING_DOMAIN_ALLOWLIST.getKey() + + "] to a non [\"*\"] value at the same time." + ); + } + } + + @Override + public Iterator> settings() { + List> settingDomainAllowlist = List.of(SETTING_DOMAIN_ALLOWLIST); + return settingDomainAllowlist.iterator(); + } + }, Property.Dynamic, Property.NodeScope ); @@ -167,6 +232,7 @@ public class EmailService extends NotificationService { private final CryptoService cryptoService; private final SSLService sslService; private volatile Set allowedDomains; + private volatile Set allowedRecipientPatterns; @SuppressWarnings("this-escape") public EmailService(Settings settings, @Nullable CryptoService cryptoService, SSLService sslService, ClusterSettings clusterSettings) { @@ -192,7 +258,9 @@ public EmailService(Settings settings, @Nullable CryptoService cryptoService, SS clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_SEND_PARTIAL, (s, o) -> {}, (s, o) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_WAIT_ON_QUIT, (s, o) -> {}, (s, o) -> {}); this.allowedDomains = new HashSet<>(SETTING_DOMAIN_ALLOWLIST.get(settings)); + this.allowedRecipientPatterns = new HashSet<>(SETTING_RECIPIENT_ALLOW_PATTERNS.get(settings)); clusterSettings.addSettingsUpdateConsumer(SETTING_DOMAIN_ALLOWLIST, this::updateAllowedDomains); + clusterSettings.addSettingsUpdateConsumer(SETTING_RECIPIENT_ALLOW_PATTERNS, this::updateAllowedRecipientPatterns); // do an initial load reload(settings); } @@ -201,6 +269,10 @@ void updateAllowedDomains(List newDomains) { this.allowedDomains = new HashSet<>(newDomains); } + void updateAllowedRecipientPatterns(List newPatterns) { + this.allowedRecipientPatterns = new HashSet<>(newPatterns); + } + @Override protected Account createAccount(String name, Settings accountSettings) { Account.Config config = new Account.Config(name, accountSettings, getSmtpSslSocketFactory(), logger); @@ -228,33 +300,47 @@ public EmailSent send(Email email, Authentication auth, Profile profile, String "failed to send email with subject [" + email.subject() + "] and recipient domains " - + getRecipientDomains(email) + + getRecipients(email, true) + ", one or more recipients is not specified in the domain allow list setting [" + SETTING_DOMAIN_ALLOWLIST.getKey() + "]." ); } + if (recipientAddressInAllowList(email, this.allowedRecipientPatterns) == false) { + throw new IllegalArgumentException( + "failed to send email with subject [" + + email.subject() + + "] and recipients " + + getRecipients(email, false) + + ", one or more recipients is not specified in the domain allow list setting [" + + SETTING_RECIPIENT_ALLOW_PATTERNS.getKey() + + "]." + ); + } return send(email, auth, profile, account); } // Visible for testing - static Set getRecipientDomains(Email email) { - return Stream.concat( + static Set getRecipients(Email email, boolean domainsOnly) { + var stream = Stream.concat( Optional.ofNullable(email.to()).map(addrs -> Arrays.stream(addrs.toArray())).orElse(Stream.empty()), Stream.concat( Optional.ofNullable(email.cc()).map(addrs -> Arrays.stream(addrs.toArray())).orElse(Stream.empty()), Optional.ofNullable(email.bcc()).map(addrs -> Arrays.stream(addrs.toArray())).orElse(Stream.empty()) ) - ) - .map(InternetAddress::getAddress) - // Pull out only the domain of the email address, so foo@bar.com -> bar.com - .map(emailAddress -> emailAddress.substring(emailAddress.lastIndexOf('@') + 1)) - .collect(Collectors.toSet()); + ).map(InternetAddress::getAddress); + + if (domainsOnly) { + // Pull out only the domain of the email address, so foo@bar.com becomes bar.com + stream = stream.map(emailAddress -> emailAddress.substring(emailAddress.lastIndexOf('@') + 1)); + } + + return stream.collect(Collectors.toSet()); } // Visible for testing static boolean recipientDomainsInAllowList(Email email, Set allowedDomainSet) { - if (allowedDomainSet.size() == 0) { + if (allowedDomainSet.isEmpty()) { // Nothing is allowed return false; } @@ -262,12 +348,29 @@ static boolean recipientDomainsInAllowList(Email email, Set allowedDomai // Don't bother checking, because there is a wildcard all return true; } - final Set domains = getRecipientDomains(email); + final Set domains = getRecipients(email, true); final Predicate matchesAnyAllowedDomain = domain -> allowedDomainSet.stream() .anyMatch(allowedDomain -> Regex.simpleMatch(allowedDomain, domain, true)); return domains.stream().allMatch(matchesAnyAllowedDomain); } + // Visible for testing + static boolean recipientAddressInAllowList(Email email, Set allowedRecipientPatterns) { + if (allowedRecipientPatterns.isEmpty()) { + // Nothing is allowed + return false; + } + if (allowedRecipientPatterns.contains("*")) { + // Don't bother checking, because there is a wildcard all + return true; + } + + final Set recipients = getRecipients(email, false); + final Predicate matchesAnyAllowedRecipient = recipient -> allowedRecipientPatterns.stream() + .anyMatch(pattern -> Regex.simpleMatch(pattern, recipient, true)); + return recipients.stream().allMatch(matchesAnyAllowedRecipient); + } + private static EmailSent send(Email email, Authentication auth, Profile profile, Account account) throws MessagingException { assert account != null; try { @@ -304,6 +407,7 @@ private static List> getDynamicSettings() { return Arrays.asList( SETTING_DEFAULT_ACCOUNT, SETTING_DOMAIN_ALLOWLIST, + SETTING_RECIPIENT_ALLOW_PATTERNS, SETTING_PROFILE, SETTING_EMAIL_DEFAULTS, SETTING_SMTP_AUTH, diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java index 9dba72b1f64c3..0ed27a4073653 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.watcher.rest.action; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; @@ -42,19 +43,24 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) { - PutWatchRequest putWatchRequest = new PutWatchRequest(request.param("id"), request.content(), request.getXContentType()); + var content = request.content(); + PutWatchRequest putWatchRequest = new PutWatchRequest(request.param("id"), content, request.getXContentType()); putWatchRequest.setVersion(request.paramAsLong("version", Versions.MATCH_ANY)); putWatchRequest.setIfSeqNo(request.paramAsLong("if_seq_no", putWatchRequest.getIfSeqNo())); putWatchRequest.setIfPrimaryTerm(request.paramAsLong("if_primary_term", putWatchRequest.getIfPrimaryTerm())); putWatchRequest.setActive(request.paramAsBoolean("active", putWatchRequest.isActive())); - return channel -> client.execute(PutWatchAction.INSTANCE, putWatchRequest, new RestBuilderListener<>(channel) { - @Override - public RestResponse buildResponse(PutWatchResponse response, XContentBuilder builder) throws Exception { - response.toXContent(builder, request); - RestStatus status = response.isCreated() ? CREATED : OK; - return new RestResponse(status, builder); - } - }); + return channel -> client.execute( + PutWatchAction.INSTANCE, + putWatchRequest, + ActionListener.withRef(new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(PutWatchResponse response, XContentBuilder builder) throws Exception { + response.toXContent(builder, request); + RestStatus status = response.isCreated() ? CREATED : OK; + return new RestResponse(status, builder); + } + }, content) + ); } private static final Set FILTERED_FIELDS = Set.of( diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java index 63e9dae88de41..0db99af9b3fc2 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java @@ -8,6 +8,7 @@ import org.elasticsearch.xpack.core.scheduler.Cron; +import java.time.ZoneId; import java.util.Arrays; import java.util.Comparator; import java.util.Objects; @@ -17,6 +18,7 @@ public abstract class CronnableSchedule implements Schedule { private static final Comparator CRON_COMPARATOR = Comparator.comparing(Cron::expression); protected final Cron[] crons; + private ZoneId timeZone; CronnableSchedule(String... expressions) { this(crons(expressions)); @@ -28,6 +30,17 @@ private CronnableSchedule(Cron... crons) { Arrays.sort(crons, CRON_COMPARATOR); } + protected void setTimeZone(ZoneId timeZone) { + this.timeZone = timeZone; + for (Cron cron : crons) { + cron.setTimeZone(timeZone); + } + } + + public ZoneId getTimeZone() { + return timeZone; + } + @Override public long nextScheduledTimeAfter(long startTime, long time) { assert time >= startTime; @@ -45,21 +58,22 @@ public Cron[] crons() { return crons; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CronnableSchedule that = (CronnableSchedule) o; + return Objects.deepEquals(crons, that.crons) && Objects.equals(timeZone, that.timeZone); + } + @Override public int hashCode() { - return Objects.hash((Object[]) crons); + return Objects.hash(Arrays.hashCode(crons), timeZone); } @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null || getClass() != obj.getClass()) { - return false; - } - final CronnableSchedule other = (CronnableSchedule) obj; - return Objects.deepEquals(this.crons, other.crons); + public String toString() { + return "CronnableSchedule{" + "crons=" + Arrays.toString(crons) + ", timeZone=" + timeZone + '}'; } static Cron[] crons(String... expressions) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java index 31cf46f8abaac..5d2259db71f77 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java @@ -8,8 +8,11 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.TimezoneUtils; import java.io.IOException; +import java.time.DateTimeException; +import java.time.ZoneId; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -29,9 +32,15 @@ public Schedule parse(String context, XContentParser parser) throws IOException String type = null; XContentParser.Token token; Schedule schedule = null; + ZoneId timeZone = null; // Default to UTC while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - type = parser.currentName(); + var fieldName = parser.currentName(); + if (fieldName.equals(ScheduleTrigger.TIMEZONE_FIELD)) { + timeZone = parseTimezone(parser); + } else { + type = parser.currentName(); + } } else if (type != null) { schedule = parse(context, type, parser); } else { @@ -44,9 +53,38 @@ public Schedule parse(String context, XContentParser parser) throws IOException if (schedule == null) { throw new ElasticsearchParseException("could not parse schedule. expected a schedule type field, but no fields were found"); } + + if (timeZone != null && schedule instanceof CronnableSchedule cronnableSchedule) { + cronnableSchedule.setTimeZone(timeZone); + } else if (timeZone != null) { + throw new ElasticsearchParseException( + "could not parse schedule. Timezone is not supported for schedule type [{}]", + schedule.type() + ); + } + return schedule; } + private static ZoneId parseTimezone(XContentParser parser) throws IOException { + ZoneId timeZone; + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_STRING) { + String text = parser.text(); + try { + timeZone = TimezoneUtils.parse(text); + } catch (DateTimeException e) { + throw new ElasticsearchParseException("could not parse schedule. invalid timezone [{}]", e, text); + } + } else { + throw new ElasticsearchParseException( + "could not parse schedule. expected a string value for timezone, but found [{}] instead", + token + ); + } + return timeZone; + } + public Schedule parse(String context, String type, XContentParser parser) throws IOException { Schedule.Parser scheduleParser = parsers.get(type); if (scheduleParser == null) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java index 4a67841e6c88e..cc6ec8f5aaa57 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java @@ -14,6 +14,7 @@ public class ScheduleTrigger implements Trigger { public static final String TYPE = "schedule"; + public static final String TIMEZONE_FIELD = "timezone"; private final Schedule schedule; @@ -49,7 +50,13 @@ public int hashCode() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.startObject().field(schedule.type(), schedule, params).endObject(); + builder.startObject(); + if (schedule instanceof CronnableSchedule cronnableSchedule && cronnableSchedule.getTimeZone() != null) { + builder.field(TIMEZONE_FIELD, cronnableSchedule.getTimeZone().getId()); + } + + builder.field(schedule.type(), schedule, params); + return builder.endObject(); } public static Builder builder(Schedule schedule) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/TimezoneUtils.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/TimezoneUtils.java new file mode 100644 index 0000000000000..c77fdda803bec --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/TimezoneUtils.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import java.time.DateTimeException; +import java.time.ZoneId; +import java.util.Locale; +import java.util.Map; + +import static java.util.stream.Collectors.toMap; + +/** + * Utility class for dealing with Timezone related operations. + */ +public class TimezoneUtils { + + private static final Map caseInsensitiveTZLookup; + + static { + caseInsensitiveTZLookup = ZoneId.getAvailableZoneIds() + .stream() + .collect(toMap(zoneId -> zoneId.toLowerCase(Locale.ROOT), ZoneId::of)); + } + + /** + * Parses a timezone string into a {@link ZoneId} object. The timezone string can be a valid timezone ID, or a + * timezone offset string and is case-insensitive. + * + * @param timezoneString The timezone string to parse + * @return The parsed {@link ZoneId} object + * @throws DateTimeException If the timezone string is not a valid timezone ID or offset + */ + public static ZoneId parse(String timezoneString) throws DateTimeException { + try { + return ZoneId.of(timezoneString); + } catch (DateTimeException e) { + ZoneId timeZone = caseInsensitiveTZLookup.get(timezoneString.toLowerCase(Locale.ROOT)); + if (timeZone != null) { + return timeZone; + } + try { + return ZoneId.of(timezoneString.toUpperCase(Locale.ROOT)); + } catch (DateTimeException ignored) { + // ignore + } + throw e; + } + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java index a0ce8b18d8a96..4a668d0f9817a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java @@ -69,6 +69,31 @@ public void testSend() throws Exception { assertThat(sent.account(), is("account1")); } + public void testDomainAndRecipientAllowCantBeSetAtSameTime() { + Settings settings = Settings.builder() + .putList("xpack.notification.email.account.domain_allowlist", "bar.com") + .putList("xpack.notification.email.recipient_allowlist", "*-user@potato.com") + .build(); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new EmailService( + settings, + null, + mock(SSLService.class), + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())) + ) + ); + + assertThat( + e.getMessage(), + containsString( + "Cannot set both [xpack.notification.email.recipient_allowlist] and " + + "[xpack.notification.email.account.domain_allowlist] to a non [\"*\"] value at the same time." + ) + ); + } + public void testAccountSmtpPropertyConfiguration() { Settings settings = Settings.builder() .put("xpack.notification.email.account.account1.smtp.host", "localhost") @@ -140,7 +165,7 @@ public void testExtractDomains() throws Exception { Collections.emptyMap() ); assertThat( - EmailService.getRecipientDomains(email), + EmailService.getRecipients(email, true), containsInAnyOrder("bar.com", "eggplant.com", "example.com", "another.com", "bcc.com") ); @@ -158,7 +183,7 @@ public void testExtractDomains() throws Exception { "htmlbody", Collections.emptyMap() ); - assertThat(EmailService.getRecipientDomains(email), containsInAnyOrder("bar.com", "eggplant.com", "example.com")); + assertThat(EmailService.getRecipients(email, true), containsInAnyOrder("bar.com", "eggplant.com", "example.com")); } public void testAllowedDomain() throws Exception { @@ -322,6 +347,264 @@ public void testChangeDomainAllowListSetting() throws UnsupportedEncodingExcepti assertThat(e2.getMessage(), containsString("port out of range")); } + public void testRecipientAddressInAllowList_EmptyAllowedPatterns() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of(); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testRecipientAddressInAllowList_WildcardPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("*"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_SpecificPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("foo@bar.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testRecipientAddressInAllowList_MultiplePatterns() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("foo@bar.com", "baz@potato.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_MixedCasePatterns() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("FOO@BAR.COM", "BAZ@POTATO.COM"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_PartialWildcardPrefixPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("foo@*", "baz@*"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_PartialWildcardSuffixPattern() throws UnsupportedEncodingException { + Email email = createTestEmail("foo@bar.com", "baz@potato.com"); + Set allowedPatterns = Set.of("*@bar.com", "*@potato.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(true)); + } + + public void testRecipientAddressInAllowList_DisallowedCCAddressesFails() throws UnsupportedEncodingException { + Email email = new Email( + "id", + new Email.Address("sender@domain.com", "Sender"), + createAddressList("foo@bar.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + createAddressList("cc@allowed.com", "cc@notallowed.com"), + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + Set allowedPatterns = Set.of("foo@bar.com", "cc@allowed.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testRecipientAddressInAllowList_DisallowedBCCAddressesFails() throws UnsupportedEncodingException { + Email email = new Email( + "id", + new Email.Address("sender@domain.com", "Sender"), + createAddressList("foo@bar.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + null, + createAddressList("bcc@allowed.com", "bcc@notallowed.com"), + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + Set allowedPatterns = Set.of("foo@bar.com", "bcc@allowed.com"); + assertThat(EmailService.recipientAddressInAllowList(email, allowedPatterns), is(false)); + } + + public void testAllowedRecipient() throws Exception { + Email email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*"))); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of())); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of(""))); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("foo@other.com", "*o@bar.com"))); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("buzz@other.com", "*.com"))); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*.CoM"))); + + // Invalid email in CC doesn't blow up + email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + createAddressList("badEmail"), + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of("*@other.com", "*iii@bar.com"))); + + // Check CC + email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + createAddressList("thing@other.com"), + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*@other.com", "*@bar.com"))); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of("*oo@bar.com"))); + + // Check BCC + email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com"), + null, + createAddressList("thing@other.com"), + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + assertTrue(EmailService.recipientAddressInAllowList(email, Set.of("*@other.com", "*@bar.com"))); + assertFalse(EmailService.recipientAddressInAllowList(email, Set.of("*oo@bar.com"))); + } + + public void testSendEmailWithRecipientNotInAllowList() throws Exception { + service.updateAllowedRecipientPatterns(Collections.singletonList(randomFrom("*@bar.*", "*@bar.com", "*b*"))); + Email email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com", "non-whitelisted@invalid.com"), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + when(account.name()).thenReturn("account1"); + Authentication auth = new Authentication("user", new Secret("passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.send(email, auth, profile, "account1")); + assertThat( + e.getMessage(), + containsString( + "failed to send email with subject [subject] and recipients [non-whitelisted@invalid.com, foo@bar.com], " + + "one or more recipients is not specified in the domain allow list setting " + + "[xpack.notification.email.recipient_allowlist]." + ) + ); + } + + public void testChangeRecipientAllowListSetting() throws UnsupportedEncodingException, MessagingException { + Settings settings = Settings.builder() + .put("xpack.notification.email.account.account1.foo", "bar") + // Setting a random SMTP server name and an invalid port so that sending emails is guaranteed to fail: + .put("xpack.notification.email.account.account1.smtp.host", randomAlphaOfLength(10)) + .put("xpack.notification.email.account.account1.smtp.port", -100) + .putList("xpack.notification.email.recipient_allowlist", "*oo@bar.com") + .build(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())); + EmailService emailService = new EmailService(settings, null, mock(SSLService.class), clusterSettings); + Email email = new Email( + "id", + new Email.Address("foo@bar.com", "Mr. Foo Man"), + createAddressList("foo@bar.com", "baz@potato.com"), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList("foo@bar.com", "non-whitelisted@invalid.com"), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + when(account.name()).thenReturn("account1"); + Authentication auth = new Authentication("user", new Secret("passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + + // This send will fail because one of the recipients ("non-whitelisted@invalid.com") is in a domain that is not in the allowed list + IllegalArgumentException e1 = expectThrows( + IllegalArgumentException.class, + () -> emailService.send(email, auth, profile, "account1") + ); + assertThat( + e1.getMessage(), + containsString( + "failed to send email with subject [subject] and recipients [non-whitelisted@invalid.com, foo@bar.com], " + + "one or more recipients is not specified in the domain allow list setting " + + "[xpack.notification.email.recipient_allowlist]." + ) + ); + + // Now dynamically add "invalid.com" to the list of allowed domains: + Settings newSettings = Settings.builder() + .putList("xpack.notification.email.recipient_allowlist", "*@bar.com", "*@invalid.com") + .build(); + clusterSettings.applySettings(newSettings); + // Still expect an exception because we're not actually sending the email, but it's no longer because the domain isn't allowed: + IllegalArgumentException e2 = expectThrows( + IllegalArgumentException.class, + () -> emailService.send(email, auth, profile, "account1") + ); + assertThat(e2.getMessage(), containsString("port out of range")); + } + + private Email createTestEmail(String... recipients) throws UnsupportedEncodingException { + return new Email( + "id", + new Email.Address("sender@domain.com", "Sender"), + createAddressList(recipients), + randomFrom(Email.Priority.values()), + ZonedDateTime.now(), + createAddressList(recipients), + null, + null, + "subject", + "body", + "htmlbody", + Collections.emptyMap() + ); + } + private static Email.AddressList createAddressList(String... emails) throws UnsupportedEncodingException { List addresses = new ArrayList<>(); for (String email : emails) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index 1691a464d8061..59dc1db88e991 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -16,11 +16,13 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -96,17 +98,20 @@ public static void main(String[] args) throws Exception { ); System.out.println("and heap_max=" + JvmInfo.jvmInfo().getMem().getHeapMax()); + Environment internalNodeEnv = InternalSettingsPreparer.prepareEnvironment( + Settings.builder().put(SETTINGS).put("node.data", false).build(), + emptyMap(), + null, + () -> { + throw new IllegalArgumentException("settings must have [node.name]"); + } + ); + // First clean everything and index the watcher (but not via put alert api!) try ( Node node = new Node( - InternalSettingsPreparer.prepareEnvironment( - Settings.builder().put(SETTINGS).put("node.data", false).build(), - emptyMap(), - null, - () -> { - throw new IllegalArgumentException("settings must have [node.name]"); - } - ) + internalNodeEnv, + PluginsLoader.createPluginsLoader(internalNodeEnv.modulesFile(), internalNodeEnv.pluginsFile()) ).start() ) { final Client client = node.client(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java index 7fc4739c342f1..aa39701d207c3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import java.time.ZoneId; import java.util.HashSet; import java.util.Set; @@ -49,15 +50,23 @@ public void testParserInterval() throws Exception { } public void testParseCron() throws Exception { - Object cron = randomBoolean() ? Schedules.cron("* 0/5 * * * ?") : Schedules.cron("* 0/2 * * * ?", "* 0/3 * * * ?", "* 0/5 * * * ?"); - XContentBuilder builder = jsonBuilder().startObject().field(CronSchedule.TYPE, cron).endObject(); + var cron = randomBoolean() ? Schedules.cron("* 0/5 * * * ?") : Schedules.cron("* 0/2 * * * ?", "* 0/3 * * * ?", "* 0/5 * * * ?"); + ZoneId timeZone = null; + XContentBuilder builder = jsonBuilder().startObject().field(CronSchedule.TYPE, cron); + if (randomBoolean()) { + timeZone = randomTimeZone().toZoneId(); + cron.setTimeZone(timeZone); + builder.field(ScheduleTrigger.TIMEZONE_FIELD, timeZone.getId()); + } + builder.endObject(); BytesReference bytes = BytesReference.bytes(builder); XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); - Schedule schedule = registry.parse("ctx", parser); + CronnableSchedule schedule = (CronnableSchedule) registry.parse("ctx", parser); assertThat(schedule, notNullValue()); assertThat(schedule, instanceOf(CronSchedule.class)); assertThat(schedule, is(cron)); + assertThat(schedule.getTimeZone(), equalTo(timeZone)); } public void testParseHourly() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/TimezoneUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/TimezoneUtilsTests.java new file mode 100644 index 0000000000000..aa797ec610eca --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/TimezoneUtilsTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.test.ESTestCase; + +import java.time.ZoneId; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; + +public class TimezoneUtilsTests extends ESTestCase { + + public void testExpectedFormatParsing() { + assertThat(TimezoneUtils.parse("Europe/London").getId(), equalTo("Europe/London")); + assertThat(TimezoneUtils.parse("+1").getId(), equalTo("+01:00")); + assertThat(TimezoneUtils.parse("GMT+01:00").getId(), equalTo("GMT+01:00")); + } + + public void testParsingIsCaseInsensitive() { + ZoneId timeZone = randomTimeZone().toZoneId(); + assertThat(TimezoneUtils.parse(timeZone.getId()), equalTo(timeZone)); + assertThat(TimezoneUtils.parse(timeZone.getId().toLowerCase(Locale.ROOT)), equalTo(timeZone)); + assertThat(TimezoneUtils.parse(timeZone.getId().toUpperCase(Locale.ROOT)), equalTo(timeZone)); + } + + public void testParsingOffsets() { + ZoneId timeZone = ZoneId.of("GMT+01:00"); + assertThat(TimezoneUtils.parse("GMT+01:00"), equalTo(timeZone)); + assertThat(TimezoneUtils.parse("gmt+01:00"), equalTo(timeZone)); + assertThat(TimezoneUtils.parse("GMT+1"), equalTo(timeZone)); + + assertThat(TimezoneUtils.parse("+1"), equalTo(ZoneId.of("+01:00"))); + } +} diff --git a/x-pack/plugin/wildcard/build.gradle b/x-pack/plugin/wildcard/build.gradle index 2bcf0db057aa5..1a4f133402582 100644 --- a/x-pack/plugin/wildcard/build.gradle +++ b/x-pack/plugin/wildcard/build.gradle @@ -1,7 +1,12 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-es-plugin' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' esplugin { name 'wildcard' @@ -19,9 +24,3 @@ dependencies { compileOnly project(path: xpackModule('core')) testImplementation(testArtifact(project(xpackModule('core')))) } - -if (BuildParams.isSnapshotBuild() == false) { - tasks.named("test").configure { - systemProperty 'es.index_mode_feature_flag_registered', 'true' - } -} diff --git a/x-pack/plugin/wildcard/src/yamlRestTest/java/org/elasticsearch/xpack/wildcard/WildcardClientYamlTestSuiteIT.java b/x-pack/plugin/wildcard/src/yamlRestTest/java/org/elasticsearch/xpack/wildcard/WildcardClientYamlTestSuiteIT.java index 61eb0c8b0de3e..c9ec7d71b1805 100644 --- a/x-pack/plugin/wildcard/src/yamlRestTest/java/org/elasticsearch/xpack/wildcard/WildcardClientYamlTestSuiteIT.java +++ b/x-pack/plugin/wildcard/src/yamlRestTest/java/org/elasticsearch/xpack/wildcard/WildcardClientYamlTestSuiteIT.java @@ -10,8 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; /** Runs yaml rest tests */ public class WildcardClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -24,4 +26,12 @@ public WildcardClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testC public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("wildcard").build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle index 0b8e459ed231b..65f2282014dc4 100644 --- a/x-pack/qa/core-rest-tests-with-security/build.gradle +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + apply plugin: 'elasticsearch.internal-yaml-rest-test' -import org.elasticsearch.gradle.internal.info.BuildParams dependencies { testImplementation project(':x-pack:qa') @@ -27,7 +33,7 @@ tasks.named("yamlRestTest").configure { 'index/10_with_id/Index with ID', 'indices.get_alias/10_basic/Get alias against closed indices' ]; - if (BuildParams.isSnapshotBuild() == false) { + if (buildParams.isSnapshotBuild() == false) { blacklist += [ 'synonyms_privileges/10_synonyms_with_privileges/*', 'synonyms_privileges/20_synonyms_no_privileges/*' diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 7248d1b0a6bfb..ee0955c6db082 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-java-rest-test' @@ -15,7 +21,7 @@ dependencies { } -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index a56ddaabe8280..fe4c1c20c69c4 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -23,13 +23,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.test.MapMatcher; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.XContentBuilder; @@ -53,8 +50,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.TimeValue.timeValueSeconds; -import static org.elasticsearch.test.MapMatcher.assertMap; -import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.upgrades.FullClusterRestartIT.assertNumHits; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -295,10 +290,6 @@ public void testWatcherWithApiKey() throws Exception { } public void testServiceAccountApiKey() throws IOException { - @UpdateForV9(owner = UpdateForV9.Owner.SECURITY) - var originalClusterSupportsServiceAccounts = oldClusterHasFeature(RestTestLegacyFeatures.SERVICE_ACCOUNTS_SUPPORTED); - assumeTrue("no service accounts in versions before 7.13", originalClusterSupportsServiceAccounts); - if (isRunningAgainstOldCluster()) { final Request createServiceTokenRequest = new Request("POST", "/_security/service/elastic/fleet-server/credential/token"); final Response createServiceTokenResponse = client().performRequest(createServiceTokenRequest); @@ -484,10 +475,6 @@ public void testRollupAfterRestart() throws Exception { } public void testTransformLegacyTemplateCleanup() throws Exception { - @UpdateForV9(owner = UpdateForV9.Owner.MACHINE_LEARNING) - var originalClusterSupportsTransform = oldClusterHasFeature(RestTestLegacyFeatures.TRANSFORM_SUPPORTED); - assumeTrue("Before 7.2 transforms didn't exist", originalClusterSupportsTransform); - if (isRunningAgainstOldCluster()) { // create the source index @@ -562,9 +549,6 @@ public void testTransformLegacyTemplateCleanup() throws Exception { } public void testSlmPolicyAndStats() throws IOException { - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - var originalClusterSupportsSlm = oldClusterHasFeature(RestTestLegacyFeatures.SLM_SUPPORTED); - SnapshotLifecyclePolicy slmPolicy = new SnapshotLifecyclePolicy( "test-policy", "test-policy", @@ -573,7 +557,7 @@ public void testSlmPolicyAndStats() throws IOException { Collections.singletonMap("indices", Collections.singletonList("*")), null ); - if (isRunningAgainstOldCluster() && originalClusterSupportsSlm) { + if (isRunningAgainstOldCluster()) { Request createRepoRequest = new Request("PUT", "_snapshot/test-repo"); String repoCreateJson = "{" + " \"type\": \"fs\"," + " \"settings\": {" + " \"location\": \"test-repo\"" + " }" + "}"; createRepoRequest.setJsonEntity(repoCreateJson); @@ -587,7 +571,7 @@ public void testSlmPolicyAndStats() throws IOException { client().performRequest(createSlmPolicyRequest); } - if (isRunningAgainstOldCluster() == false && originalClusterSupportsSlm) { + if (isRunningAgainstOldCluster() == false) { Request getSlmPolicyRequest = new Request("GET", "_slm/policy/test-policy"); Response response = client().performRequest(getSlmPolicyRequest); Map responseMap = entityAsMap(response); @@ -914,14 +898,6 @@ private void waitForRollUpJob(final String rollupJob, final Matcher expectedS @SuppressWarnings("unchecked") public void testDataStreams() throws Exception { - - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - var originalClusterSupportsDataStreams = oldClusterHasFeature(RestTestLegacyFeatures.DATA_STREAMS_SUPPORTED); - - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - var originalClusterDataStreamHasDateInIndexName = oldClusterHasFeature(RestTestLegacyFeatures.NEW_DATA_STREAMS_INDEX_NAME_FORMAT); - - assumeTrue("no data streams in versions before 7.9.0", originalClusterSupportsDataStreams); if (isRunningAgainstOldCluster()) { createComposableTemplate(client(), "dst", "ds"); @@ -958,89 +934,10 @@ public void testDataStreams() throws Exception { List> indices = (List>) ds.get("indices"); assertEquals("ds", ds.get("name")); assertEquals(1, indices.size()); - assertEquals( - DataStreamTestHelper.getLegacyDefaultBackingIndexName("ds", 1, timestamp, originalClusterDataStreamHasDateInIndexName), - indices.get(0).get("index_name") - ); + assertEquals(DataStreamTestHelper.getLegacyDefaultBackingIndexName("ds", 1, timestamp), indices.get(0).get("index_name")); assertNumHits("ds", 1, 1); } - /** - * Tests that a single document survives. Super basic smoke test. - */ - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) // Can be removed - public void testDisableFieldNameField() throws IOException { - assumeFalse( - "can only disable field names field before 8.0", - oldClusterHasFeature(RestTestLegacyFeatures.DISABLE_FIELD_NAMES_FIELD_REMOVED) - ); - - String docLocation = "/nofnf/_doc/1"; - String doc = """ - { - "dv": "test", - "no_dv": "test" - }"""; - - if (isRunningAgainstOldCluster()) { - Request createIndex = new Request("PUT", "/nofnf"); - createIndex.setJsonEntity(""" - { - "settings": { - "index": { - "number_of_replicas": 1 - } - }, - "mappings": { - "_field_names": { "enabled": false }, - "properties": { - "dv": { "type": "keyword" }, - "no_dv": { "type": "keyword", "doc_values": false } - } - } - }"""); - createIndex.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk())); - client().performRequest(createIndex); - - Request createDoc = new Request("PUT", docLocation); - createDoc.addParameter("refresh", "true"); - createDoc.setJsonEntity(doc); - client().performRequest(createDoc); - } - - Request getRequest = new Request("GET", docLocation); - assertThat(toStr(client().performRequest(getRequest)), containsString(doc)); - - if (isRunningAgainstOldCluster() == false) { - Request esql = new Request("POST", "_query"); - esql.setJsonEntity(""" - { - "query": "FROM nofnf | LIMIT 1" - }"""); - // {"columns":[{"name":"dv","type":"keyword"},{"name":"no_dv","type":"keyword"}],"values":[["test",null]]} - try { - Map result = entityAsMap(client().performRequest(esql)); - MapMatcher mapMatcher = matchesMap(); - if (result.get("took") != null) { - mapMatcher = mapMatcher.entry("took", ((Integer) result.get("took")).intValue()); - } - assertMap( - result, - mapMatcher.entry( - "columns", - List.of(Map.of("name", "dv", "type", "keyword"), Map.of("name", "no_dv", "type", "keyword")) - ).entry("values", List.of(List.of("test", "test"))) - ); - } catch (ResponseException e) { - logger.error( - "failed to query index without field name field. Existing indices:\n{}", - EntityUtils.toString(client().performRequest(new Request("GET", "_cat/indices")).getEntity()) - ); - throw e; - } - } - } - /** * Ignore the warning about the {@code _field_names} field. We intentionally * turn that field off sometimes. And other times old versions spuriously diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java index c825de31a7f6e..91820299da8a5 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlConfigIndexMappingsFullClusterRestartIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.test.rest.IndexMappingTemplateAsserter; import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; @@ -48,11 +47,7 @@ protected Settings restClientSettings() { public void waitForMlTemplates() throws Exception { // We shouldn't wait for ML templates during the upgrade - production won't if (isRunningAgainstOldCluster()) { - XPackRestTestHelper.waitForTemplates( - client(), - XPackRestTestConstants.ML_POST_V7120_TEMPLATES, - clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED) - ); + XPackRestTestHelper.waitForTemplates(client(), XPackRestTestConstants.ML_POST_V7120_TEMPLATES); } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index 7dc0a2f48bbc9..a83ad5b4f8da4 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -64,11 +63,7 @@ protected Settings restClientSettings() { public void waitForMlTemplates() throws Exception { // We shouldn't wait for ML templates during the upgrade - production won't if (isRunningAgainstOldCluster()) { - XPackRestTestHelper.waitForTemplates( - client(), - XPackRestTestConstants.ML_POST_V7120_TEMPLATES, - clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED) - ); + XPackRestTestHelper.waitForTemplates(client(), XPackRestTestConstants.ML_POST_V7120_TEMPLATES); } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 0b15e98f201a0..74f62fac26488 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -58,11 +57,7 @@ protected Settings restClientSettings() { public void waitForMlTemplates() throws Exception { // We shouldn't wait for ML templates during the upgrade - production won't if (isRunningAgainstOldCluster()) { - XPackRestTestHelper.waitForTemplates( - client(), - XPackRestTestConstants.ML_POST_V7120_TEMPLATES, - clusterHasFeature(RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED) - ); + XPackRestTestHelper.waitForTemplates(client(), XPackRestTestConstants.ML_POST_V7120_TEMPLATES); } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java deleted file mode 100644 index fee6910fcf6c0..0000000000000 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/WatcherMappingUpdateIT.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.restart; - -import com.carrotsearch.randomizedtesting.annotations.Name; - -import org.apache.http.util.EntityUtils; -import org.elasticsearch.Build; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; -import org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus; -import org.junit.Before; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.not; - -@UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) -// Remove the whole test suite (superseded by SystemIndexMappingUpdateServiceIT#testSystemIndexManagerUpgradesMappings) -public class WatcherMappingUpdateIT extends AbstractXpackFullClusterRestartTestCase { - - public WatcherMappingUpdateIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { - super(upgradeStatus); - } - - @Before - public void setup() { - // This test is superseded by SystemIndexMappingUpdateServiceIT#testSystemIndexManagerUpgradesMappings for newer versions - assumeFalse( - "Starting from 8.11, the mappings upgrade service uses mappings versions instead of node versions", - clusterHasFeature(RestTestLegacyFeatures.MAPPINGS_UPGRADE_SERVICE_USES_MAPPINGS_VERSION) - ); - } - - @Override - protected Settings restClientSettings() { - String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); - return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); - } - - public void testMappingsAreUpdated() throws Exception { - if (isRunningAgainstOldCluster()) { - // post a watch - Request putWatchRequest = new Request("PUT", "_watcher/watch/log_error_watch"); - putWatchRequest.setJsonEntity(""" - { - "trigger" : { - "schedule" : { "interval" : "10s" } - }, - "input" : { - "search" : { - "request" : { - "indices" : [ "logs" ], - "body" : { - "query" : { - "match" : { "message": "error" } - } - } - } - } - } - } - """); - client().performRequest(putWatchRequest); - - assertMappingVersion(".watches", getOldClusterVersion()); - } else { - assertMappingVersion(".watches", Build.current().version()); - } - } - - private void assertMappingVersion(String index, String clusterVersion) throws Exception { - assertBusy(() -> { - Request mappingRequest = new Request("GET", index + "/_mappings"); - mappingRequest.setOptions(getWarningHandlerOptions(index)); - Response response = client().performRequest(mappingRequest); - String responseBody = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(responseBody, containsString("\"version\":\"" + clusterVersion + "\"")); - }, 60L, TimeUnit.SECONDS); - } - - private void assertNoMappingVersion(String index) throws Exception { - assertBusy(() -> { - Request mappingRequest = new Request("GET", index + "/_mappings"); - assert isRunningAgainstOldCluster(); - mappingRequest.setOptions(getWarningHandlerOptions(index)); - Response response = client().performRequest(mappingRequest); - String responseBody = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); - assertThat(responseBody, not(containsString("\"version\":\""))); - }, 60L, TimeUnit.SECONDS); - } - - private RequestOptions.Builder getWarningHandlerOptions(String index) { - return RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(w -> w.size() > 0 && w.contains(getWatcherSystemIndexWarning(index)) == false); - } - - private String getWatcherSystemIndexWarning(String index) { - return "this request accesses system indices: [" - + index - + "], but in a future major version, " - + "direct access to system indices will be prevented by default"; - } -} diff --git a/x-pack/qa/mixed-tier-cluster/build.gradle b/x-pack/qa/mixed-tier-cluster/build.gradle index bf05be45e18a0..bee28c47dc867 100644 --- a/x-pack/qa/mixed-tier-cluster/build.gradle +++ b/x-pack/qa/mixed-tier-cluster/build.gradle @@ -1,16 +1,22 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' -apply plugin: 'elasticsearch.bwc-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask +apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.bwc-test' + dependencies { javaRestTestImplementation project(':x-pack:qa') } // Only run tests for 7.9+, since the node.roles setting was introduced in 7.9.0 -BuildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && +buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && v != VersionProperties.getElasticsearchVersion()) { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { @@ -54,5 +60,5 @@ tasks.withType(Test).configureEach { classpath = sourceSets.javaRestTest.runtimeClasspath testClassesDirs = sourceSets.javaRestTest.output.classesDirs // Security is explicitly disabled, do not run tests in FIPS mode - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index 6d41c4eddf31c..83c231da7529c 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE @@ -13,7 +19,7 @@ restResources { } // randomise between sniff and proxy modes -boolean proxyMode = BuildParams.random.nextBoolean() +boolean proxyMode = buildParams.random.nextBoolean() def fulfillingCluster = testClusters.register('fulfilling-cluster') { setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle index 69c0e8b20c2c4..6e95d718b19de 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE @@ -13,7 +19,7 @@ restResources { } // randomise between sniff and proxy modes -boolean proxyMode = BuildParams.random.nextBoolean() +boolean proxyMode = buildParams.random.nextBoolean() def fulfillingCluster = testClusters.register('fulfilling-cluster') { setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle index 1164aa240ee22..5c6235e092458 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.test.RestIntegTestTask import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE @@ -23,7 +29,7 @@ tasks.register("copyCerts", Sync) { } // randomise between sniff and proxy modes -boolean proxyMode = BuildParams.random.nextBoolean() +boolean proxyMode = buildParams.random.nextBoolean() def fulfillingCluster = testClusters.register('fulfilling-cluster') { setting 'xpack.security.enabled', 'true' diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 8f46613d5d9f0..43d1cd12cdfb7 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.internal-java-rest-test' @@ -11,5 +16,5 @@ dependencies { tasks.named('javaRestTest') { usesDefaultDistribution() // test suite uses jks which is not supported in fips mode - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/qa/repository-old-versions/build.gradle b/x-pack/qa/repository-old-versions/build.gradle index 1abf6662a1b8b..ecd02ac9d209f 100644 --- a/x-pack/qa/repository-old-versions/build.gradle +++ b/x-pack/qa/repository-old-versions/build.gradle @@ -9,7 +9,6 @@ import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.BwcVersions -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask import org.elasticsearch.gradle.transform.UnzipTransform @@ -98,7 +97,7 @@ if (OS.current() == OS.WINDOWS) { TaskProvider fixture = tasks.register("oldES${versionNoDots}Fixture", AntFixture) { dependsOn project.configurations.oldesFixture, jdks.legacy, config - executable = "${BuildParams.runtimeJavaHome}/bin/java" + executable = "${buildParams.runtimeJavaHome.get()}/bin/java" env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" // old versions of Elasticsearch need JAVA_HOME env 'JAVA_HOME', jdks.legacy.javaHomePath diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java index 67dbdec6b8399..95bc92d4f185a 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.WarningsHandler; @@ -166,7 +167,10 @@ public void setupIndex() throws IOException { createRestoreRequest.addParameter("wait_for_completion", "true"); createRestoreRequest.setJsonEntity("{\"indices\":\"" + indices.stream().collect(Collectors.joining(",")) + "\"}"); createRestoreRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); - assertOK(client().performRequest(createRestoreRequest)); + Response response = client().performRequest(createRestoreRequest); + // check deprecation warning for "_field_name" disabling + assertTrue(response.getWarnings().stream().filter(s -> s.contains("Disabling _field_names is not necessary")).count() > 0); + assertOK(response); } private Request createIndex(String indexName, String file) throws IOException { diff --git a/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/custom.json b/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/custom.json index ae52ccbcce330..ad1c6b0dc59ae 100644 --- a/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/custom.json +++ b/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/custom.json @@ -1,4 +1,7 @@ "_default_": { + "_field_names": { + "enabled": false + }, "properties": { "apache2": { "properties": { diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index a7ea1695c477a..9a447f35eb13c 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -9,7 +15,7 @@ dependencies { testImplementation project(':x-pack:qa') } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { testDistribution = "DEFAULT" versions = [bwcVersion.toString(), project.version] @@ -78,5 +84,5 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> // Security is explicitly disabled, do not run tests in FIPS mode tasks.withType(Test).configureEach { - enabled = BuildParams.inFipsJvm == false -} \ No newline at end of file + enabled = buildParams.inFipsJvm == false +} diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle index 969ba23e19254..ebcb4cd9760fe 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle +++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle @@ -1,4 +1,10 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -9,7 +15,7 @@ dependencies { testImplementation project(':x-pack:qa') } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> def baseLeaderCluster = testClusters.register("${baseName}-leader") { numberOfNodes = 3 @@ -92,5 +98,5 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> // Security is explicitly disabled, do not run tests in FIPS mode tasks.withType(Test).configureEach { - BuildParams.withFipsEnabledOnly(it) + buildParams.withFipsEnabledOnly(it) } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 271aadfe4b388..2049ccb5d9cc8 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.elasticsearch.gradle.internal.BwcVersions -import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' @@ -31,7 +37,7 @@ tasks.register("copyTestNodeKeyMaterial", Copy) { into outputDir } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> +buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java index 8a775c7f7d3d8..25e8aed73bda2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ApiKeyBackwardsCompatibilityIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -118,20 +117,18 @@ public void testCreatingAndUpdatingApiKeys() throws Exception { ); RestClient client = client(); - if (isUpdateApiSupported(client)) { - var updateException = expectThrows( - Exception.class, - () -> updateOrBulkUpdateApiKey(client, apiKey.v1(), randomRoleDescriptors(true)) - ); + var updateException = expectThrows( + Exception.class, + () -> updateOrBulkUpdateApiKey(client, apiKey.v1(), randomRoleDescriptors(true)) + ); - assertThat( - updateException.getMessage(), - anyOf( - containsString("failed to parse role [my_role]. unexpected field [remote_indices]"), - containsString("remote indices not supported for API keys") - ) - ); - } + assertThat( + updateException.getMessage(), + anyOf( + containsString("failed to parse role [my_role]. unexpected field [remote_indices]"), + containsString("remote indices not supported for API keys") + ) + ); } case MIXED -> { try { @@ -145,20 +142,18 @@ public void testCreatingAndUpdatingApiKeys() throws Exception { // fail when remote_indices are provided: // against old node - if (isUpdateApiSupported(oldVersionClient)) { - Exception e = expectThrows( - Exception.class, - () -> updateOrBulkUpdateApiKey(oldVersionClient, apiKey.v1(), randomRoleDescriptors(true)) - ); - assertThat( - e.getMessage(), - anyOf( - containsString("failed to parse role [my_role]. unexpected field [remote_indices]"), - containsString("remote indices not supported for API keys") - ) - ); - } - Exception e = expectThrows(Exception.class, () -> createOrGrantApiKey(oldVersionClient, randomRoleDescriptors(true))); + Exception e = expectThrows( + Exception.class, + () -> updateOrBulkUpdateApiKey(oldVersionClient, apiKey.v1(), randomRoleDescriptors(true)) + ); + assertThat( + e.getMessage(), + anyOf( + containsString("failed to parse role [my_role]. unexpected field [remote_indices]"), + containsString("remote indices not supported for API keys") + ) + ); + e = expectThrows(Exception.class, () -> createOrGrantApiKey(oldVersionClient, randomRoleDescriptors(true))); assertThat( e.getMessage(), anyOf( @@ -263,28 +258,9 @@ private void updateOrBulkUpdateApiKey(String id, String roles) throws IOExceptio updateOrBulkUpdateApiKey(client(), id, roles); } - private boolean isUpdateApiSupported(RestClient client) { - return switch (CLUSTER_TYPE) { - case OLD -> clusterHasFeature(RestTestLegacyFeatures.SECURITY_UPDATE_API_KEY); // Update API was introduced in 8.4.0. - case MIXED -> clusterHasFeature(RestTestLegacyFeatures.SECURITY_UPDATE_API_KEY) || client == newVersionClient; - case UPGRADED -> true; - }; - } - - private boolean isBulkUpdateApiSupported(RestClient client) { - return switch (CLUSTER_TYPE) { - case OLD -> clusterHasFeature(RestTestLegacyFeatures.SECURITY_BULK_UPDATE_API_KEY); // Bulk update API was introduced in 8.5.0. - case MIXED -> clusterHasFeature(RestTestLegacyFeatures.SECURITY_BULK_UPDATE_API_KEY) || client == newVersionClient; - case UPGRADED -> true; - }; - } - private void updateOrBulkUpdateApiKey(RestClient client, String id, String roles) throws IOException { - if (false == isUpdateApiSupported(client)) { - return; // Update API is not supported. - } final Request updateApiKeyRequest; - final boolean bulkUpdate = randomBoolean() && isBulkUpdateApiSupported(client); + final boolean bulkUpdate = randomBoolean(); if (bulkUpdate) { updateApiKeyRequest = new Request("POST", "_security/api_key/_bulk_update"); updateApiKeyRequest.setJsonEntity(org.elasticsearch.common.Strings.format(""" diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java index 74165eeb07b8a..aa166311f6465 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -83,11 +82,7 @@ public void testMlAssignmentPlannerUpgrade() throws Exception { // assert correct memory format is used assertOldMemoryFormat("old_memory_format"); - if (clusterHasFeature(RestTestLegacyFeatures.ML_NEW_MEMORY_FORMAT)) { - assertNewMemoryFormat("new_memory_format"); - } else { - assertOldMemoryFormat("new_memory_format"); - } + assertNewMemoryFormat("new_memory_format"); } case MIXED -> { ensureHealth(".ml-inference-*,.ml-config*", (request -> { @@ -99,12 +94,7 @@ public void testMlAssignmentPlannerUpgrade() throws Exception { // assert correct memory format is used assertOldMemoryFormat("old_memory_format"); - if (clusterHasFeature(RestTestLegacyFeatures.ML_NEW_MEMORY_FORMAT)) { - assertNewMemoryFormat("new_memory_format"); - } else { - assertOldMemoryFormat("new_memory_format"); - } - + assertNewMemoryFormat("new_memory_format"); } case UPGRADED -> { ensureHealth(".ml-inference-*,.ml-config*", (request -> { @@ -137,14 +127,12 @@ private void waitForDeploymentStarted(String modelId) throws Exception { @SuppressWarnings("unchecked") private void assertOldMemoryFormat(String modelId) throws Exception { - // There was a change in the MEMORY_OVERHEAD value in 8.3.0, see #86416 - long memoryOverheadMb = clusterHasFeature(RestTestLegacyFeatures.ML_MEMORY_OVERHEAD_FIXED) ? 240 : 270; var response = getTrainedModelStats(modelId); Map map = entityAsMap(response); List> stats = (List>) map.get("trained_model_stats"); assertThat(stats, hasSize(1)); var stat = stats.get(0); - Long expectedMemoryUsage = ByteSizeValue.ofMb(memoryOverheadMb).getBytes() + RAW_MODEL_SIZE * 2; + Long expectedMemoryUsage = ByteSizeValue.ofMb(240).getBytes() + RAW_MODEL_SIZE * 2; Integer actualMemoryUsage = (Integer) XContentMapValues.extractValue("model_size_stats.required_native_memory_bytes", stat); assertThat( Strings.format("Memory usage mismatch for the model %s in cluster state %s", modelId, CLUSTER_TYPE.toString()), diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java deleted file mode 100644 index e864a579bd0b0..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.upgrades; - -import org.elasticsearch.Build; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.client.Request; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.test.rest.RestTestLegacyFeatures; - -import java.util.Map; - -import static org.elasticsearch.cluster.ClusterState.INFERRED_TRANSPORT_VERSION; -import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.everyItem; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.oneOf; - -public class TransportVersionClusterStateUpgradeIT extends AbstractUpgradeTestCase { - - public void testReadsInferredTransportVersions() throws Exception { - // waitUntil because the versions fixup on upgrade happens in the background so may need a retry - assertTrue(waitUntil(() -> { - try { - // check several responses in order to sample from a selection of nodes - for (int i = getClusterHosts().size(); i > 0; i--) { - if (runTransportVersionsTest() == false) { - return false; - } - } - return true; - } catch (Exception e) { - throw new AssertionError(e); - } - })); - } - - private boolean runTransportVersionsTest() throws Exception { - final var clusterState = ObjectPath.createFromResponse( - client().performRequest(new Request("GET", "/_cluster/state" + randomFrom("", "/nodes") + randomFrom("", "?local"))) - ); - final var description = clusterState.toString(); - - final var nodeIds = clusterState.evaluateMapKeys("nodes"); - final Map versionsByNodeId = Maps.newHashMapWithExpectedSize(nodeIds.size()); - for (final var nodeId : nodeIds) { - versionsByNodeId.put(nodeId, clusterState.evaluate("nodes." + nodeId + ".version")); - } - - final var hasTransportVersions = clusterState.evaluate("transport_versions") != null; - final var hasNodesVersions = clusterState.evaluate("nodes_versions") != null; - assertFalse(description, hasNodesVersions && hasTransportVersions); - - switch (CLUSTER_TYPE) { - case OLD -> { - if (clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED) == false) { - // Before 8.8.0 there was only DiscoveryNode#version - assertFalse(description, hasTransportVersions); - assertFalse(description, hasNodesVersions); - } else if (clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION) == false) { - // In [8.8.0, 8.11.0) we exposed just transport_versions - assertTrue(description, hasTransportVersions); - assertFalse(description, hasNodesVersions); - } else { - // From 8.11.0 onwards we exposed nodes_versions - assertFalse(description, hasTransportVersions); - assertTrue(description, hasNodesVersions); - } - } - case MIXED -> { - if (clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED) == false) { - // Responding node might be <8.8.0 (so no extra versions) or >=8.11.0 (includes nodes_versions) - assertFalse(description, hasTransportVersions); - } else if (clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION) == false) { - // Responding node might be in [8.8.0, 8.11.0) (transport_versions) or >=8.11.0 (includes nodes_versions) but not both - assertTrue(description, hasNodesVersions || hasTransportVersions); - } else { - // Responding node is ≥8.11.0 so has nodes_versions for sure - assertFalse(description, hasTransportVersions); - assertTrue(description, hasNodesVersions); - } - } - case UPGRADED -> { - // All nodes are Version.CURRENT, ≥8.11.0, so we definitely have nodes_versions - assertFalse(description, hasTransportVersions); - assertTrue(description, hasNodesVersions); - assertThat(description, versionsByNodeId.values(), everyItem(equalTo(Build.current().version()))); - } - } - - if (hasTransportVersions) { - // Upgrading from [8.8.0, 8.11.0) and the responding node is still on the old version - assertFalse(description, clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION)); - assertTrue(description, clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED)); - assertNotEquals(description, ClusterType.UPGRADED, CLUSTER_TYPE); - - // transport_versions includes the correct version for all nodes, no inference is needed - assertEquals(description, nodeIds.size(), clusterState.evaluateArraySize("transport_versions")); - for (int i = 0; i < nodeIds.size(); i++) { - final var path = "transport_versions." + i; - final String nodeId = clusterState.evaluate(path + ".node_id"); - final var nodeDescription = nodeId + "/" + description; - final var transportVersion = TransportVersion.fromString(clusterState.evaluate(path + ".transport_version")); - final var nodeVersion = versionsByNodeId.get(nodeId); - assertNotNull(nodeDescription, nodeVersion); - if (nodeVersion.equals(Build.current().version())) { - assertEquals(nodeDescription, TransportVersion.current(), transportVersion); - } else { - // There's no relationship between node versions and transport versions anymore, although we can be sure of this: - assertThat(nodeDescription, transportVersion, greaterThanOrEqualTo(INFERRED_TRANSPORT_VERSION)); - } - } - } else if (hasNodesVersions) { - // Either upgrading from ≥8.11.0 (the responding node might be old or new), or from <8.8.0 (the responding node is new) - assertFalse( - description, - clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION) == false - && CLUSTER_TYPE == ClusterType.OLD - ); - - // nodes_versions includes _a_ version for all nodes; it might be correct, or it might be inferred if we're upgrading from - // <8.8.0 and the master is still an old node or the TransportVersionsFixupListener hasn't run yet - assertEquals(description, nodeIds.size(), clusterState.evaluateArraySize("nodes_versions")); - for (int i = 0; i < nodeIds.size(); i++) { - final var path = "nodes_versions." + i; - final String nodeId = clusterState.evaluate(path + ".node_id"); - final var nodeDescription = nodeId + "/" + description; - final var transportVersion = TransportVersion.fromString(clusterState.evaluate(path + ".transport_version")); - final var nodeVersion = versionsByNodeId.get(nodeId); - assertNotNull(nodeDescription, nodeVersion); - if (nodeVersion.equals(Build.current().version())) { - // Either the responding node is upgraded or the upgrade is trivial; if the responding node is upgraded but the master - // is not then its transport version may be temporarily inferred as 8.8.0 until TransportVersionsFixupListener runs. - assertThat( - nodeDescription, - transportVersion, - clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED) - ? equalTo(TransportVersion.current()) - : oneOf(TransportVersion.current(), INFERRED_TRANSPORT_VERSION) - ); - if (CLUSTER_TYPE == ClusterType.UPGRADED && transportVersion.equals(INFERRED_TRANSPORT_VERSION)) { - // TransportVersionsFixupListener should run soon, retry - logger.info("{} - not fixed up yet, retrying", nodeDescription); - return false; - } - } else { - var version = parseLegacyVersion(nodeVersion); - // All non-semantic versions are after 8.8.0 and have transport version - var transportVersionIntroduced = version.map(v -> v.after(VERSION_INTRODUCING_TRANSPORT_VERSIONS)).orElse(true); - if (transportVersionIntroduced) { - // There's no relationship between node versions and transport versions anymore, although we can be sure of this: - assertThat(nodeDescription, transportVersion, greaterThan(INFERRED_TRANSPORT_VERSION)); - } else { - // Responding node is not upgraded, and no later than 8.8.0, so we infer its version correctly. - assertEquals(nodeDescription, TransportVersion.fromId(version.get().id()), transportVersion); - } - } - } - } - - return true; - } -} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index d7d2676163851..0c57baad1a09b 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -27,7 +27,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.rest.RestTestLegacyFeatures.COMPONENT_TEMPLATE_SUPPORTED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -43,12 +42,7 @@ public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCa public void waitForTemplates() throws Exception { if (AbstractUpgradeTestCase.CLUSTER_TYPE == AbstractUpgradeTestCase.ClusterType.OLD) { try { - boolean clusterUnderstandsComposableTemplates = clusterHasFeature(COMPONENT_TEMPLATE_SUPPORTED); - XPackRestTestHelper.waitForTemplates( - client(), - XPackRestTestConstants.ML_POST_V7120_TEMPLATES, - clusterUnderstandsComposableTemplates - ); + XPackRestTestHelper.waitForTemplates(client(), XPackRestTestConstants.ML_POST_V7120_TEMPLATES); } catch (AssertionError e) { throw new AssertionError("Failure in test setup: Failed to initialize ML index templates", e); } diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index da2d095c001d4..461ebc4beb443 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,6 +1,11 @@ -import org.apache.tools.ant.filters.ReplaceTokens +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -import org.elasticsearch.gradle.internal.info.BuildParams +import org.apache.tools.ant.filters.ReplaceTokens apply plugin: 'elasticsearch.legacy-yaml-rest-test' diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index 427aa39f02e49..a51a67dd75b8a 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -1,5 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import org.apache.tools.ant.filters.ReplaceTokens -import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.rest-resources' diff --git a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java index 31b74b8706877..88b01defb1a86 100644 --- a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java +++ b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.json.JsonXContent; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -34,32 +33,27 @@ private XPackRestTestHelper() {} * @throws InterruptedException If the wait is interrupted */ @SuppressWarnings("unchecked") - public static void waitForTemplates(RestClient client, List expectedTemplates, boolean clusterUnderstandsComposableTemplates) - throws Exception { + public static void waitForTemplates(RestClient client, List expectedTemplates) throws Exception { // TODO: legacy support can be removed once all X-Pack plugins use only composable // templates in the oldest version we test upgrades from assertBusy(() -> { Map response; - if (clusterUnderstandsComposableTemplates) { - final Request request = new Request("GET", "_index_template"); - request.addParameter("error_trace", "true"); + final Request request = new Request("GET", "_index_template"); + request.addParameter("error_trace", "true"); - String string = EntityUtils.toString(client.performRequest(request).getEntity()); - List> templateList = (List>) XContentHelper.convertToMap( - JsonXContent.jsonXContent, - string, - false - ).get("index_templates"); - response = templateList.stream().collect(Collectors.toMap(m -> (String) m.get("name"), m -> m.get("index_template"))); - } else { - response = Collections.emptyMap(); - } + String string = EntityUtils.toString(client.performRequest(request).getEntity()); + List> templateList = (List>) XContentHelper.convertToMap( + JsonXContent.jsonXContent, + string, + false + ).get("index_templates"); + response = templateList.stream().collect(Collectors.toMap(m -> (String) m.get("name"), m -> m.get("index_template"))); final Set templates = new TreeSet<>(response.keySet()); final Request legacyRequest = new Request("GET", "_template"); legacyRequest.addParameter("error_trace", "true"); - String string = EntityUtils.toString(client.performRequest(legacyRequest).getEntity()); + string = EntityUtils.toString(client.performRequest(legacyRequest).getEntity()); Map legacyResponse = XContentHelper.convertToMap(JsonXContent.jsonXContent, string, false); final Set legacyTemplates = new TreeSet<>(legacyResponse.keySet()); diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index b7268af807535..7e3d0485545a6 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -1,10 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + import groovy.json.JsonSlurper import javax.net.ssl.HttpsURLConnection import java.nio.charset.StandardCharsets -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.legacy-yaml-rest-test' dependencies { @@ -55,7 +60,7 @@ if (!jiraUrl && !jiraUser && !jiraPassword && !jiraProject) { tasks.named("yamlRestTest")configure { finalizedBy "cleanJira" } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("yamlRestTest").configure{ enabled = false } } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index 4b5a0bbeeeb4a..20f7b9b654b66 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-yaml-rest-test' @@ -28,7 +33,7 @@ if (!pagerDutyServiceKey) { } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index b2b0478da0471..54821a9d2b71a 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -1,4 +1,9 @@ -import org.elasticsearch.gradle.internal.info.BuildParams +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ apply plugin: 'elasticsearch.legacy-yaml-rest-test' @@ -28,7 +33,7 @@ if (!slackUrl) { } } -if (BuildParams.inFipsJvm){ +if (buildParams.inFipsJvm){ // Test clusters run with security disabled tasks.named("yamlRestTest").configure{enabled = false } }